mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-09 14:34:43 +00:00
This is a backport of #54803 for 7.x. This pull request cherry picks the squashed commit from #54803 with the additional commits: 6f50c92 which adjusts master code to 7.x a114549 to mute a failing ILM test (#54818) 48cbca1 and 50186b2 that cleans up and fixes the previous test aae12bb that adds a missing feature flag (#54861) 6f330e3 that adds missing serialization bits (#54864) bf72c02 that adjust the version in YAML tests a51955f that adds some plumbing for the transport client used in integration tests Co-authored-by: David Turner <david.turner@elastic.co> Co-authored-by: Yannick Welsch <yannick@welsch.lu> Co-authored-by: Lee Hinman <dakrone@users.noreply.github.com> Co-authored-by: Andrei Dan <andrei.dan@elastic.co>
This commit is contained in:
parent
8f2ddaee1a
commit
4d36917e52
@ -47,17 +47,21 @@ public final class Allocators {
|
||||
public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator();
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
|
||||
public void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation) {
|
||||
// noop
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
|
||||
public void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation) {
|
||||
// noop
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
public void allocateUnassigned(
|
||||
ShardRouting shardRouting,
|
||||
RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler
|
||||
) {
|
||||
// noop
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +54,7 @@ testClusters.integTest {
|
||||
if (BuildParams.isSnapshotBuild() == false) {
|
||||
systemProperty 'es.autoscaling_feature_flag_registered', 'true'
|
||||
systemProperty 'es.eql_feature_flag_registered', 'true'
|
||||
systemProperty 'es.searchable_snapshots_feature_enabled', 'true'
|
||||
}
|
||||
setting 'xpack.autoscaling.enabled', 'true'
|
||||
setting 'xpack.eql.enabled', 'true'
|
||||
|
@ -100,7 +100,9 @@ If the request succeeds, the body of the response contains the policy definition
|
||||
"delete": {
|
||||
"min_age": "30d",
|
||||
"actions": {
|
||||
"delete": {}
|
||||
"delete": {
|
||||
"delete_searchable_snapshot": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -112,6 +112,7 @@ policy definition.
|
||||
- <<ilm-unfollow-action,Unfollow>>
|
||||
- <<ilm-allocate-action,Allocate>>
|
||||
- <<ilm-freeze-action,Freeze>>
|
||||
- <<ilm-searchable-snapshot-action, Searchable Snapshot>>
|
||||
* Delete
|
||||
- <<ilm-wait-for-snapshot-action,Wait For Snapshot>>
|
||||
- <<ilm-delete-action,Delete>>
|
||||
@ -266,7 +267,15 @@ Phases allowed: delete.
|
||||
|
||||
The Delete Action does just that, it deletes the index.
|
||||
|
||||
This action does not have any options associated with it.
|
||||
[[ilm-delete-action-options]]
|
||||
.Delete
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `delete_searchable_snapshot` | no | true | Deletes the searchable snapshot created in the cold phase, if a
|
||||
snapshot was created (ie. if the <<ilm-searchable-snapshot-action,
|
||||
Searchable Snapshot Action>> was used in the cold phase)
|
||||
|======
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
@ -580,6 +589,43 @@ The above example illustrates a policy which attempts to delete an
|
||||
index one day after the index has been rolled over. It does not
|
||||
delete the index one day after it has been created.
|
||||
|
||||
[[ilm-searchable-snapshot-action]]
|
||||
==== Searchable Snapshot
|
||||
|
||||
Phases allowed: cold.
|
||||
|
||||
This action takes a snapshot of the managed index in the configured repository
|
||||
and mounts it as a searchable snapshot.
|
||||
|
||||
[[ilm-searchable-snapshot-options]]
|
||||
.Searchable Snapshot Options
|
||||
[options="header"]
|
||||
|======
|
||||
| Name | Required | Default | Description
|
||||
| `snapshot_repository` | yes | - | Repository used to store the snapshot created by this action. The snapshot will be,
|
||||
by default, deleted by the <<ilm-delete-action, Delete Action>> in the delete phase, if
|
||||
configured, but the user can configure the <<ilm-delete-action, Delete Action>> to keep
|
||||
the snapshot using the `delete_searchable_snapshot` option.
|
||||
|
||||
|======
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
PUT _ilm/policy/my_policy
|
||||
{
|
||||
"policy": {
|
||||
"phases": {
|
||||
"cold": {
|
||||
"actions": {
|
||||
"searchable_snapshot" : {
|
||||
"snapshot_repository" : "backing_repo"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
[[ilm-set-priority-action]]
|
||||
==== Set Priority
|
||||
|
||||
|
@ -113,7 +113,9 @@ with its version bumped to 2.
|
||||
"delete": {
|
||||
"min_age": "10d",
|
||||
"actions": {
|
||||
"delete": {}
|
||||
"delete": {
|
||||
"delete_searchable_snapshot": true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -31,6 +31,7 @@ endif::[]
|
||||
* <<indices-reload-analyzers,Reload Search Analyzers API>>
|
||||
* <<rollup-apis,Rollup APIs>>
|
||||
* <<search, Search APIs>>
|
||||
* <<searchable-snapshots-apis, Searchable snapshots APIs>>
|
||||
* <<security-api,Security APIs>>
|
||||
* <<snapshot-lifecycle-management-api,Snapshot lifecycle management APIs>>
|
||||
* <<transform-apis,{transform-cap} APIs>>
|
||||
@ -59,6 +60,7 @@ include::{es-repo-dir}/migration/migration.asciidoc[]
|
||||
include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[]
|
||||
include::{es-repo-dir}/rollup/rollup-api.asciidoc[]
|
||||
include::{es-repo-dir}/search.asciidoc[]
|
||||
include::{es-repo-dir}/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc[]
|
||||
include::{xes-repo-dir}/rest-api/security.asciidoc[]
|
||||
include::{es-repo-dir}/slm/apis/slm-api.asciidoc[]
|
||||
include::{es-repo-dir}/transform/apis/index.asciidoc[]
|
||||
|
@ -0,0 +1,76 @@
|
||||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[searchable-snapshots-api-clear-cache]]
|
||||
=== Clear cache API
|
||||
++++
|
||||
<titleabbrev>Clear cache</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
Clear the cache of searchable snapshots.
|
||||
|
||||
[[searchable-snapshots-api-clear-cache-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST /_searchable_snapshots/cache/clear`
|
||||
|
||||
`POST /<index>/_searchable_snapshots/cache/clear`
|
||||
|
||||
[[searchable-snapshots-api-clear-cache-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have the
|
||||
`manage` cluster privilege and the `manage` index privilege
|
||||
for any included indices to use this API.
|
||||
For more information, see <<security-privileges>>.
|
||||
|
||||
[[searchable-snapshots-api-clear-cache-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
|
||||
[[searchable-snapshots-api-clear-cache-path-params]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<index>`::
|
||||
(Optional, string)
|
||||
A comma-separated list of index names for which the
|
||||
searchable snapshots cache must be cleared.
|
||||
|
||||
|
||||
[[searchable-snapshots-api-clear-cache-example]]
|
||||
==== {api-examples-title}
|
||||
////
|
||||
[source,console]
|
||||
-----------------------------------
|
||||
PUT /docs
|
||||
{
|
||||
"settings" : {
|
||||
"index.number_of_shards" : 1,
|
||||
"index.number_of_replicas" : 0
|
||||
}
|
||||
}
|
||||
|
||||
PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true
|
||||
{
|
||||
"include_global_state": false,
|
||||
"indices": "docs"
|
||||
}
|
||||
|
||||
DELETE /docs
|
||||
|
||||
POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true
|
||||
{
|
||||
"index": "docs"
|
||||
}
|
||||
-----------------------------------
|
||||
// TEST[setup:setup-repository]
|
||||
////
|
||||
|
||||
Clears the cache of the index `docs`:
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /docs/_searchable_snapshots/cache/clear
|
||||
--------------------------------------------------
|
||||
// TEST[continued]
|
76
docs/reference/searchable-snapshots/apis/get-stats.asciidoc
Normal file
76
docs/reference/searchable-snapshots/apis/get-stats.asciidoc
Normal file
@ -0,0 +1,76 @@
|
||||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[searchable-snapshots-api-stats]]
|
||||
=== Searchable snapshot statistics API
|
||||
++++
|
||||
<titleabbrev>Searchable snapshot statistics</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
Retrieve various statistics about searchable snapshots.
|
||||
|
||||
[[searchable-snapshots-api-stats-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`GET /_searchable_snapshots/stats`
|
||||
|
||||
`GET /<index>/_searchable_snapshots/stats`
|
||||
|
||||
[[searchable-snapshots-api-stats-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have the
|
||||
`manage` cluster privilege and the `manage` index privilege
|
||||
for any included indices to use this API.
|
||||
For more information, see <<security-privileges>>.
|
||||
|
||||
[[searchable-snapshots-api-stats-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
|
||||
[[searchable-snapshots-api-stats-path-params]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<index>`::
|
||||
(Optional, string)
|
||||
A comma-separated list of index names for which the
|
||||
statistics must be retrieved.
|
||||
|
||||
|
||||
[[searchable-snapshots-api-stats-example]]
|
||||
==== {api-examples-title}
|
||||
////
|
||||
[source,console]
|
||||
-----------------------------------
|
||||
PUT /docs
|
||||
{
|
||||
"settings" : {
|
||||
"index.number_of_shards" : 1,
|
||||
"index.number_of_replicas" : 0
|
||||
}
|
||||
}
|
||||
|
||||
PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true
|
||||
{
|
||||
"include_global_state": false,
|
||||
"indices": "docs"
|
||||
}
|
||||
|
||||
DELETE /docs
|
||||
|
||||
POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true
|
||||
{
|
||||
"index": "docs"
|
||||
}
|
||||
-----------------------------------
|
||||
// TEST[setup:setup-repository]
|
||||
////
|
||||
|
||||
Retrieves the statistics of the index `docs`:
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
GET /docs/_searchable_snapshots/stats
|
||||
--------------------------------------------------
|
||||
// TEST[continued]
|
126
docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc
Normal file
126
docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc
Normal file
@ -0,0 +1,126 @@
|
||||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[searchable-snapshots-api-mount-snapshot]]
|
||||
=== Mount snapshot API
|
||||
++++
|
||||
<titleabbrev>Mount snapshot</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[]
|
||||
|
||||
Mount a snapshot as a searchable index.
|
||||
|
||||
[[searchable-snapshots-api-mount-request]]
|
||||
==== {api-request-title}
|
||||
|
||||
`POST /_snapshot/<repository>/<snapshot>/_mount`
|
||||
|
||||
[[searchable-snapshots-api-mount-prereqs]]
|
||||
==== {api-prereq-title}
|
||||
|
||||
If the {es} {security-features} are enabled, you must have the
|
||||
`manage` cluster privilege and the `manage` index privilege
|
||||
for any included indices to use this API.
|
||||
For more information, see <<security-privileges>>.
|
||||
|
||||
[[searchable-snapshots-api-mount-desc]]
|
||||
==== {api-description-title}
|
||||
|
||||
|
||||
[[searchable-snapshots-api-mount-path-params]]
|
||||
==== {api-path-parms-title}
|
||||
|
||||
`<repository>`::
|
||||
(Required, string)
|
||||
The name of the repository containing
|
||||
the snapshot of the index to mount.
|
||||
|
||||
`<snapshot>`::
|
||||
(Required, string)
|
||||
The name of the snapshot of the index
|
||||
to mount.
|
||||
|
||||
[[searchable-snapshots-api-mount-query-params]]
|
||||
==== {api-query-parms-title}
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout]
|
||||
|
||||
include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_completion]
|
||||
|
||||
[[searchable-snapshots-api-mount-request-body]]
|
||||
==== {api-request-body-title}
|
||||
|
||||
`index`::
|
||||
(Required, string)
|
||||
Name of the index contained in the snapshot
|
||||
whose data is to be mounted.
|
||||
|
||||
If no `renamed_index` is specified this name
|
||||
will also be used to create the new index.
|
||||
|
||||
`renamed_index`::
|
||||
+
|
||||
--
|
||||
(Optional, string)
|
||||
Name of the index that will be created.
|
||||
--
|
||||
|
||||
`index_settings`::
|
||||
+
|
||||
--
|
||||
(Optional, object)
|
||||
Settings that should be added to the index when it is mounted.
|
||||
--
|
||||
|
||||
`ignore_index_settings`::
|
||||
+
|
||||
--
|
||||
(Optional, array of strings)
|
||||
Names of settings that should be removed from the index when it is mounted.
|
||||
--
|
||||
|
||||
[[searchable-snapshots-api-mount-example]]
|
||||
==== {api-examples-title}
|
||||
////
|
||||
[source,console]
|
||||
-----------------------------------
|
||||
PUT /my_docs
|
||||
{
|
||||
"settings" : {
|
||||
"index.number_of_shards" : 1,
|
||||
"index.number_of_replicas" : 0
|
||||
}
|
||||
}
|
||||
|
||||
PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true
|
||||
{
|
||||
"include_global_state": false,
|
||||
"indices": "my_docs"
|
||||
}
|
||||
|
||||
DELETE /my_docs
|
||||
-----------------------------------
|
||||
// TEST[setup:setup-repository]
|
||||
////
|
||||
|
||||
Mounts the index `my_docs` from an existing snapshot named `my_snapshot` stored
|
||||
in the `my_repository` as a new index `docs`:
|
||||
|
||||
[source,console]
|
||||
--------------------------------------------------
|
||||
POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true
|
||||
{
|
||||
"index": "my_docs", <1>
|
||||
"renamed_index": "docs", <2>
|
||||
"index_settings": { <3>
|
||||
"index.number_of_replicas": 0
|
||||
},
|
||||
"ignored_index_settings": [ "index.refresh_interval" ] <4>
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TEST[continued]
|
||||
|
||||
<1> The name of the index in the snapshot to mount
|
||||
<2> The name of the index to create
|
||||
<3> Any index settings to add to the new index
|
||||
<4> List of indices to ignore when mounting the snapshotted index
|
@ -0,0 +1,16 @@
|
||||
[role="xpack"]
|
||||
[testenv="basic"]
|
||||
[[searchable-snapshots-apis]]
|
||||
== Searchable snapshots APIs
|
||||
|
||||
experimental[]
|
||||
|
||||
You can use the following APIs to perform searchable snapshots operations.
|
||||
|
||||
* <<searchable-snapshots-api-mount-snapshot,Mount snapshot>>
|
||||
* <<searchable-snapshots-api-clear-cache,Clear cache>>
|
||||
* <<searchable-snapshots-api-stats,Get stats>>
|
||||
|
||||
include::mount-snapshot.asciidoc[]
|
||||
include::clear-cache.asciidoc[]
|
||||
include::get-stats.asciidoc[]
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import com.microsoft.azure.storage.Constants;
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
@ -68,10 +69,8 @@ public class AzureBlobContainer extends AbstractBlobContainer {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName) throws IOException {
|
||||
logger.trace("readBlob({})", blobName);
|
||||
|
||||
private InputStream openInputStream(String blobName, long position, @Nullable Long length) throws IOException {
|
||||
logger.trace("readBlob({}) from position [{}] with length [{}]", blobName, position, length != null ? length : "unlimited");
|
||||
if (blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY && !blobExists(blobName)) {
|
||||
// On Azure, if the location path is a secondary location, and the blob does not
|
||||
// exist, instead of returning immediately from the getInputStream call below
|
||||
@ -81,9 +80,8 @@ public class AzureBlobContainer extends AbstractBlobContainer {
|
||||
// stream to it.
|
||||
throw new NoSuchFileException("Blob [" + blobName + "] does not exist");
|
||||
}
|
||||
|
||||
try {
|
||||
return blobStore.getInputStream(buildKey(blobName));
|
||||
return blobStore.getInputStream(buildKey(blobName), position, length);
|
||||
} catch (StorageException e) {
|
||||
if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) {
|
||||
throw new NoSuchFileException(e.getMessage());
|
||||
@ -94,6 +92,21 @@ public class AzureBlobContainer extends AbstractBlobContainer {
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName) throws IOException {
|
||||
return openInputStream(blobName, 0L, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName, long position, long length) throws IOException {
|
||||
return openInputStream(blobName, position, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readBlobPreferredLength() {
|
||||
return Constants.DEFAULT_MINIMUM_READ_SIZE_IN_BYTES;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
|
||||
logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize);
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.repositories.azure;
|
||||
import com.microsoft.azure.storage.LocationMode;
|
||||
import com.microsoft.azure.storage.StorageException;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobMetadata;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
@ -100,8 +101,8 @@ public class AzureBlobStore implements BlobStore {
|
||||
return service.deleteBlobDirectory(clientName, container, path, executor);
|
||||
}
|
||||
|
||||
public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException {
|
||||
return service.getInputStream(clientName, container, blob);
|
||||
public InputStream getInputStream(String blob, long position, @Nullable Long length) throws URISyntaxException, StorageException {
|
||||
return service.getInputStream(clientName, container, blob, position, length);
|
||||
}
|
||||
|
||||
public Map<String, BlobMetadata> listBlobsByPrefix(String keyPath, String prefix)
|
||||
|
@ -43,6 +43,7 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.blobstore.BlobMetadata;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.DeleteResult;
|
||||
@ -257,13 +258,13 @@ public class AzureStorageService {
|
||||
return new DeleteResult(blobsDeleted.get(), bytesDeleted.get());
|
||||
}
|
||||
|
||||
public InputStream getInputStream(String account, String container, String blob)
|
||||
throws URISyntaxException, StorageException, IOException {
|
||||
public InputStream getInputStream(String account, String container, String blob, long position, @Nullable Long length)
|
||||
throws URISyntaxException, StorageException {
|
||||
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
|
||||
final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob);
|
||||
logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob));
|
||||
final BlobInputStream is = SocketAccess.doPrivilegedException(() ->
|
||||
blockBlobReference.openInputStream(null, null, client.v2().get()));
|
||||
blockBlobReference.openInputStream(position, length, null, null, client.v2().get()));
|
||||
return giveSocketPermissionsToStream(is);
|
||||
}
|
||||
|
||||
|
@ -25,12 +25,14 @@ import com.microsoft.azure.storage.blob.BlobRequestOptions;
|
||||
import com.sun.net.httpserver.HttpExchange;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import fixture.azure.AzureHttpHandler;
|
||||
import org.apache.http.HttpStatus;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
@ -63,6 +65,7 @@ import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
@ -81,8 +84,10 @@ import static org.elasticsearch.repositories.azure.AzureStorageSettings.TIMEOUT_
|
||||
import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
/**
|
||||
* This class tests how a {@link AzureBlobContainer} and its underlying SDK client are retrying requests when reading or writing blobs.
|
||||
@ -90,6 +95,8 @@ import static org.hamcrest.Matchers.lessThan;
|
||||
@SuppressForbidden(reason = "use a http server")
|
||||
public class AzureBlobContainerRetriesTests extends ESTestCase {
|
||||
|
||||
private static final long MAX_RANGE_VAL = Long.MAX_VALUE - 1L;
|
||||
|
||||
private HttpServer httpServer;
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@ -128,7 +135,7 @@ public class AzureBlobContainerRetriesTests extends ESTestCase {
|
||||
final AzureStorageService service = new AzureStorageService(clientSettings.build()) {
|
||||
@Override
|
||||
RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) {
|
||||
return new RetryExponentialRetry(1, 100, 500, azureStorageSettings.getMaxRetries());
|
||||
return new RetryExponentialRetry(1, 10, 100, azureStorageSettings.getMaxRetries());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -150,7 +157,16 @@ public class AzureBlobContainerRetriesTests extends ESTestCase {
|
||||
|
||||
public void testReadNonexistentBlobThrowsNoSuchFileException() {
|
||||
final BlobContainer blobContainer = createBlobContainer(between(1, 5));
|
||||
final Exception exception = expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob"));
|
||||
final Exception exception = expectThrows(NoSuchFileException.class,
|
||||
() -> {
|
||||
if (randomBoolean()) {
|
||||
blobContainer.readBlob("read_nonexistent_blob");
|
||||
} else {
|
||||
final long position = randomLongBetween(0, MAX_RANGE_VAL - 1L);
|
||||
final long length = randomLongBetween(1, MAX_RANGE_VAL - position);
|
||||
blobContainer.readBlob("read_nonexistent_blob", position, length);
|
||||
}
|
||||
});
|
||||
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("not found"));
|
||||
}
|
||||
|
||||
@ -160,34 +176,35 @@ public class AzureBlobContainerRetriesTests extends ESTestCase {
|
||||
final CountDown countDownGet = new CountDown(maxRetries);
|
||||
final byte[] bytes = randomBlobContent();
|
||||
httpServer.createContext("/container/read_blob_max_retries", exchange -> {
|
||||
Streams.readFully(exchange.getRequestBody());
|
||||
if ("HEAD".equals(exchange.getRequestMethod())) {
|
||||
if (countDownHead.countDown()) {
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
|
||||
exchange.close();
|
||||
return;
|
||||
try {
|
||||
Streams.readFully(exchange.getRequestBody());
|
||||
if ("HEAD".equals(exchange.getRequestMethod())) {
|
||||
if (countDownHead.countDown()) {
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
|
||||
return;
|
||||
}
|
||||
} else if ("GET".equals(exchange.getRequestMethod())) {
|
||||
if (countDownGet.countDown()) {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
final int length = bytes.length - rangeStart;
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length);
|
||||
exchange.getResponseBody().write(bytes, rangeStart, length);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else if ("GET".equals(exchange.getRequestMethod())) {
|
||||
if (countDownGet.countDown()) {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
final int length = bytes.length - rangeStart;
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length);
|
||||
exchange.getResponseBody().write(bytes, rangeStart, length);
|
||||
exchange.close();
|
||||
return;
|
||||
if (randomBoolean()) {
|
||||
AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE));
|
||||
}
|
||||
} finally {
|
||||
exchange.close();
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE));
|
||||
}
|
||||
exchange.close();
|
||||
});
|
||||
|
||||
final BlobContainer blobContainer = createBlobContainer(maxRetries);
|
||||
@ -198,6 +215,58 @@ public class AzureBlobContainerRetriesTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadRangeBlobWithRetries() throws Exception {
|
||||
final int maxRetries = randomIntBetween(1, 5);
|
||||
final CountDown countDownHead = new CountDown(maxRetries);
|
||||
final CountDown countDownGet = new CountDown(maxRetries);
|
||||
final byte[] bytes = randomBlobContent();
|
||||
httpServer.createContext("/container/read_range_blob_max_retries", exchange -> {
|
||||
try {
|
||||
Streams.readFully(exchange.getRequestBody());
|
||||
if ("HEAD".equals(exchange.getRequestMethod())) {
|
||||
if (countDownHead.countDown()) {
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1);
|
||||
return;
|
||||
}
|
||||
} else if ("GET".equals(exchange.getRequestMethod())) {
|
||||
if (countDownGet.countDown()) {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
final Optional<Integer> rangeEnd = getRangeEnd(exchange);
|
||||
assertThat(rangeEnd.isPresent(), is(true));
|
||||
assertThat(rangeEnd.get(), greaterThanOrEqualTo(rangeStart));
|
||||
final int length = (rangeEnd.get() - rangeStart) + 1;
|
||||
assertThat(length, lessThanOrEqualTo(bytes.length - rangeStart));
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length);
|
||||
exchange.getResponseBody().write(bytes, rangeStart, length);
|
||||
return;
|
||||
}
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE));
|
||||
}
|
||||
} finally {
|
||||
exchange.close();
|
||||
}
|
||||
});
|
||||
|
||||
final BlobContainer blobContainer = createBlobContainer(maxRetries);
|
||||
final int position = randomIntBetween(0, bytes.length - 1);
|
||||
final int length = randomIntBetween(1, bytes.length - position);
|
||||
try (InputStream inputStream = blobContainer.readBlob("read_range_blob_max_retries", position, length)) {
|
||||
final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream));
|
||||
assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + length)), bytesRead);
|
||||
assertThat(countDownHead.isCountedDown(), is(true));
|
||||
assertThat(countDownGet.isCountedDown(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
public void testWriteBlobWithRetries() throws Exception {
|
||||
final int maxRetries = randomIntBetween(1, 5);
|
||||
final CountDown countDown = new CountDown(maxRetries);
|
||||
@ -339,14 +408,56 @@ public class AzureBlobContainerRetriesTests extends ESTestCase {
|
||||
return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb
|
||||
}
|
||||
|
||||
private static int getRangeStart(final HttpExchange exchange) {
|
||||
private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$");
|
||||
|
||||
private static Tuple<Long, Long> getRanges(HttpExchange exchange) {
|
||||
final String rangeHeader = exchange.getRequestHeaders().getFirst("X-ms-range");
|
||||
if (rangeHeader == null) {
|
||||
return 0;
|
||||
return Tuple.tuple(0L, MAX_RANGE_VAL);
|
||||
}
|
||||
|
||||
final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(rangeHeader);
|
||||
final Matcher matcher = RANGE_PATTERN.matcher(rangeHeader);
|
||||
assertTrue(rangeHeader + " matches expected pattern", matcher.matches());
|
||||
return Math.toIntExact(Long.parseLong(matcher.group(1)));
|
||||
final long rangeStart = Long.parseLong(matcher.group(1));
|
||||
final long rangeEnd = Long.parseLong(matcher.group(2));
|
||||
assertThat(rangeStart, lessThanOrEqualTo(rangeEnd));
|
||||
return Tuple.tuple(rangeStart, rangeEnd);
|
||||
}
|
||||
|
||||
private static int getRangeStart(HttpExchange exchange) {
|
||||
return Math.toIntExact(getRanges(exchange).v1());
|
||||
}
|
||||
|
||||
private static Optional<Integer> getRangeEnd(HttpExchange exchange) {
|
||||
final long rangeEnd = getRanges(exchange).v2();
|
||||
if (rangeEnd == MAX_RANGE_VAL) {
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(Math.toIntExact(rangeEnd));
|
||||
}
|
||||
|
||||
private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
final Optional<Integer> rangeEnd = getRangeEnd(exchange);
|
||||
final int length;
|
||||
if (rangeEnd.isPresent()) {
|
||||
// adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
|
||||
final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1);
|
||||
length = effectiveRangeEnd - rangeStart;
|
||||
} else {
|
||||
length = bytes.length - rangeStart - 1;
|
||||
}
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length));
|
||||
exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob");
|
||||
exchange.sendResponseHeaders(HttpStatus.SC_OK, length);
|
||||
final int bytesToSend = randomIntBetween(0, length - 1);
|
||||
if (bytesToSend > 0) {
|
||||
exchange.getResponseBody().write(bytes, rangeStart, bytesToSend);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
exchange.getResponseBody().flush();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -221,7 +221,12 @@ processTestResources {
|
||||
MavenFilteringHack.filter(it, expansions)
|
||||
}
|
||||
|
||||
testFixtures.useFixture(':test:fixtures:s3-fixture')
|
||||
[
|
||||
's3-fixture',
|
||||
's3-fixture-with-session-token',
|
||||
's3-fixture-with-ec2',
|
||||
's3-fixture-with-ecs',
|
||||
].forEach { fixture -> testFixtures.useFixture(':test:fixtures:s3-fixture', fixture) }
|
||||
|
||||
def fixtureAddress = { fixture ->
|
||||
assert useFixture: 'closure should not be used without a fixture'
|
||||
|
@ -46,7 +46,10 @@ import org.elasticsearch.common.blobstore.DeleteResult;
|
||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetadata;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.util.ArrayList;
|
||||
@ -86,6 +89,27 @@ class S3BlobContainer extends AbstractBlobContainer {
|
||||
return new S3RetryingInputStream(blobStore, buildKey(blobName));
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName, long position, long length) throws IOException {
|
||||
if (position < 0L) {
|
||||
throw new IllegalArgumentException("position must be non-negative");
|
||||
}
|
||||
if (length < 0) {
|
||||
throw new IllegalArgumentException("length must be non-negative");
|
||||
}
|
||||
if (length == 0) {
|
||||
return new ByteArrayInputStream(new byte[0]);
|
||||
} else {
|
||||
return new S3RetryingInputStream(blobStore, buildKey(blobName), position, Math.addExact(position, length - 1));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readBlobPreferredLength() {
|
||||
// This container returns streams that must be fully consumed, so we tell consumers to make bounded requests.
|
||||
return new ByteSizeValue(32, ByteSizeUnit.MB).getBytes();
|
||||
}
|
||||
|
||||
/**
|
||||
* This implementation ignores the failIfAlreadyExists flag as the S3 API has no way to enforce this due to its weak consistency model.
|
||||
*/
|
||||
|
@ -25,6 +25,7 @@ import com.amazonaws.services.s3.model.S3Object;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
|
||||
@ -49,6 +50,8 @@ class S3RetryingInputStream extends InputStream {
|
||||
|
||||
private final S3BlobStore blobStore;
|
||||
private final String blobKey;
|
||||
private final long start;
|
||||
private final long end;
|
||||
private final int maxAttempts;
|
||||
|
||||
private InputStream currentStream;
|
||||
@ -58,17 +61,32 @@ class S3RetryingInputStream extends InputStream {
|
||||
private boolean closed;
|
||||
|
||||
S3RetryingInputStream(S3BlobStore blobStore, String blobKey) throws IOException {
|
||||
this(blobStore, blobKey, 0, Long.MAX_VALUE - 1);
|
||||
}
|
||||
|
||||
// both start and end are inclusive bounds, following the definition in GetObjectRequest.setRange
|
||||
S3RetryingInputStream(S3BlobStore blobStore, String blobKey, long start, long end) throws IOException {
|
||||
if (start < 0L) {
|
||||
throw new IllegalArgumentException("start must be non-negative");
|
||||
}
|
||||
if (end < start || end == Long.MAX_VALUE) {
|
||||
throw new IllegalArgumentException("end must be >= start and not Long.MAX_VALUE");
|
||||
}
|
||||
this.blobStore = blobStore;
|
||||
this.blobKey = blobKey;
|
||||
this.maxAttempts = blobStore.getMaxRetries() + 1;
|
||||
this.start = start;
|
||||
this.end = end;
|
||||
currentStream = openStream();
|
||||
}
|
||||
|
||||
private InputStream openStream() throws IOException {
|
||||
try (AmazonS3Reference clientReference = blobStore.clientReference()) {
|
||||
final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey);
|
||||
if (currentOffset > 0) {
|
||||
getObjectRequest.setRange(currentOffset);
|
||||
if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) {
|
||||
assert start + currentOffset <= end :
|
||||
"requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end;
|
||||
getObjectRequest.setRange(Math.addExact(start, currentOffset), end);
|
||||
}
|
||||
final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest));
|
||||
return s3Object.getObjectContent();
|
||||
@ -122,20 +140,32 @@ class S3RetryingInputStream extends InputStream {
|
||||
|
||||
private void reopenStreamOrFail(IOException e) throws IOException {
|
||||
if (attempt >= maxAttempts) {
|
||||
logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], giving up",
|
||||
blobStore.bucket(), blobKey, start + currentOffset, attempt, maxAttempts), e);
|
||||
throw addSuppressedExceptions(e);
|
||||
}
|
||||
logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], retrying",
|
||||
blobStore.bucket(), blobKey, currentOffset, attempt, maxAttempts), e);
|
||||
blobStore.bucket(), blobKey, start + currentOffset, attempt, maxAttempts), e);
|
||||
attempt += 1;
|
||||
if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) {
|
||||
failures.add(e);
|
||||
}
|
||||
try {
|
||||
Streams.consumeFully(currentStream);
|
||||
} catch (Exception e2) {
|
||||
logger.trace("Failed to fully consume stream on close", e);
|
||||
}
|
||||
IOUtils.closeWhileHandlingException(currentStream);
|
||||
currentStream = openStream();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
try {
|
||||
Streams.consumeFully(currentStream);
|
||||
} catch (Exception e) {
|
||||
logger.trace("Failed to fully consume stream on close", e);
|
||||
}
|
||||
currentStream.close();
|
||||
closed = true;
|
||||
}
|
||||
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.lucene.store.ByteArrayIndexInput;
|
||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||
@ -58,6 +59,7 @@ import java.nio.file.NoSuchFileException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
@ -70,10 +72,13 @@ import static org.elasticsearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SET
|
||||
import static org.elasticsearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.either;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
|
||||
/**
|
||||
* This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs.
|
||||
@ -81,6 +86,8 @@ import static org.hamcrest.Matchers.lessThan;
|
||||
@SuppressForbidden(reason = "use a http server")
|
||||
public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
|
||||
private static final long MAX_RANGE_VAL = Long.MAX_VALUE - 1;
|
||||
|
||||
private HttpServer httpServer;
|
||||
private S3Service service;
|
||||
|
||||
@ -139,8 +146,19 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
|
||||
public void testReadNonexistentBlobThrowsNoSuchFileException() {
|
||||
final BlobContainer blobContainer = createBlobContainer(between(1, 5), null, null, null);
|
||||
final Exception exception = expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob"));
|
||||
final long position = randomLongBetween(0, MAX_RANGE_VAL);
|
||||
final int length = randomIntBetween(0, Math.toIntExact(Math.min(Integer.MAX_VALUE, MAX_RANGE_VAL - position)));
|
||||
final Exception exception = expectThrows(NoSuchFileException.class,
|
||||
() -> {
|
||||
if (randomBoolean()) {
|
||||
blobContainer.readBlob("read_nonexistent_blob");
|
||||
} else {
|
||||
blobContainer.readBlob("read_nonexistent_blob", 0, 1);
|
||||
}
|
||||
});
|
||||
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found"));
|
||||
assertThat(expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob", position, length))
|
||||
.getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found"));
|
||||
}
|
||||
|
||||
public void testReadBlobWithRetries() throws Exception {
|
||||
@ -153,6 +171,7 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
if (countDown.countDown()) {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
assertEquals(Optional.empty(), getRangeEnd(exchange));
|
||||
exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8");
|
||||
exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length - rangeStart);
|
||||
exchange.getResponseBody().write(bytes, rangeStart, bytes.length - rangeStart);
|
||||
@ -173,8 +192,85 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
final TimeValue readTimeout = TimeValue.timeValueSeconds(between(1, 3));
|
||||
final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null);
|
||||
try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) {
|
||||
assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream)));
|
||||
assertThat(countDown.isCountedDown(), is(true));
|
||||
final int readLimit;
|
||||
final InputStream wrappedStream;
|
||||
if (randomBoolean()) {
|
||||
// read stream only partly
|
||||
readLimit = randomIntBetween(0, bytes.length);
|
||||
wrappedStream = Streams.limitStream(inputStream, readLimit);
|
||||
} else {
|
||||
readLimit = bytes.length;
|
||||
wrappedStream = inputStream;
|
||||
}
|
||||
final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream));
|
||||
logger.info("maxRetries={}, readLimit={}, byteSize={}, bytesRead={}",
|
||||
maxRetries, readLimit, bytes.length, bytesRead.length);
|
||||
assertArrayEquals(Arrays.copyOfRange(bytes, 0, readLimit), bytesRead);
|
||||
if (readLimit < bytes.length) {
|
||||
// we might have completed things based on an incomplete response, and we're happy with that
|
||||
} else {
|
||||
assertTrue(countDown.isCountedDown());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testReadRangeBlobWithRetries() throws Exception {
|
||||
final int maxRetries = randomInt(5);
|
||||
final CountDown countDown = new CountDown(maxRetries + 1);
|
||||
|
||||
final byte[] bytes = randomBlobContent();
|
||||
httpServer.createContext("/bucket/read_range_blob_max_retries", exchange -> {
|
||||
Streams.readFully(exchange.getRequestBody());
|
||||
if (countDown.countDown()) {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
assertTrue(getRangeEnd(exchange).isPresent());
|
||||
final int rangeEnd = getRangeEnd(exchange).get();
|
||||
assertThat(rangeEnd, greaterThanOrEqualTo(rangeStart));
|
||||
// adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
|
||||
final int effectiveRangeEnd = Math.min(bytes.length - 1, rangeEnd);
|
||||
final int length = (effectiveRangeEnd - rangeStart) + 1;
|
||||
exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8");
|
||||
exchange.sendResponseHeaders(HttpStatus.SC_OK, length);
|
||||
exchange.getResponseBody().write(bytes, rangeStart, length);
|
||||
exchange.close();
|
||||
return;
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY,
|
||||
HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1);
|
||||
} else if (randomBoolean()) {
|
||||
sendIncompleteContent(exchange, bytes);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
exchange.close();
|
||||
}
|
||||
});
|
||||
|
||||
final TimeValue readTimeout = TimeValue.timeValueMillis(between(100, 500));
|
||||
final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null);
|
||||
final int position = randomIntBetween(0, bytes.length - 1);
|
||||
final int length = randomIntBetween(0, randomBoolean() ? bytes.length : Integer.MAX_VALUE);
|
||||
try (InputStream inputStream = blobContainer.readBlob("read_range_blob_max_retries", position, length)) {
|
||||
final int readLimit;
|
||||
final InputStream wrappedStream;
|
||||
if (randomBoolean()) {
|
||||
// read stream only partly
|
||||
readLimit = randomIntBetween(0, length);
|
||||
wrappedStream = Streams.limitStream(inputStream, readLimit);
|
||||
} else {
|
||||
readLimit = length;
|
||||
wrappedStream = inputStream;
|
||||
}
|
||||
final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream));
|
||||
logger.info("maxRetries={}, position={}, length={}, readLimit={}, byteSize={}, bytesRead={}",
|
||||
maxRetries, position, length, readLimit, bytes.length, bytesRead.length);
|
||||
assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + readLimit)), bytesRead);
|
||||
if (readLimit == 0 || (readLimit < length && readLimit == bytesRead.length)) {
|
||||
// we might have completed things based on an incomplete response, and we're happy with that
|
||||
} else {
|
||||
assertTrue(countDown.isCountedDown());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -194,12 +290,18 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
final byte[] bytes = randomBlobContent();
|
||||
httpServer.createContext("/bucket/read_blob_incomplete", exchange -> sendIncompleteContent(exchange, bytes));
|
||||
|
||||
exception = expectThrows(SocketTimeoutException.class, () -> {
|
||||
try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) {
|
||||
final int position = randomIntBetween(0, bytes.length - 1);
|
||||
final int length = randomIntBetween(1, randomBoolean() ? bytes.length : Integer.MAX_VALUE);
|
||||
exception = expectThrows(IOException.class, () -> {
|
||||
try (InputStream stream = randomBoolean() ?
|
||||
blobContainer.readBlob("read_blob_incomplete") :
|
||||
blobContainer.readBlob("read_blob_incomplete", position, length)) {
|
||||
Streams.readFully(stream);
|
||||
}
|
||||
});
|
||||
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out"));
|
||||
assertThat(exception, either(instanceOf(SocketTimeoutException.class)).or(instanceOf(ConnectionClosedException.class)));
|
||||
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), either(containsString("read timed out")).or(
|
||||
containsString("premature end of chunk coded message body: closing chunk expected")));
|
||||
assertThat(exception.getSuppressed().length, equalTo(maxRetries));
|
||||
}
|
||||
|
||||
@ -209,7 +311,14 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
// HTTP server closes connection immediately
|
||||
httpServer.createContext("/bucket/read_blob_no_response", HttpExchange::close);
|
||||
|
||||
Exception exception = expectThrows(SdkClientException.class, () -> blobContainer.readBlob("read_blob_no_response"));
|
||||
Exception exception = expectThrows(SdkClientException.class,
|
||||
() -> {
|
||||
if (randomBoolean()) {
|
||||
blobContainer.readBlob("read_blob_no_response");
|
||||
} else {
|
||||
blobContainer.readBlob("read_blob_no_response", 0, 1);
|
||||
}
|
||||
});
|
||||
assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("the target server failed to respond"));
|
||||
assertThat(exception.getCause(), instanceOf(NoHttpResponseException.class));
|
||||
assertThat(exception.getSuppressed().length, equalTo(0));
|
||||
@ -227,12 +336,15 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
});
|
||||
|
||||
final Exception exception = expectThrows(ConnectionClosedException.class, () -> {
|
||||
try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) {
|
||||
try (InputStream stream = randomBoolean() ?
|
||||
blobContainer.readBlob("read_blob_incomplete", 0, 1):
|
||||
blobContainer.readBlob("read_blob_incomplete")) {
|
||||
Streams.readFully(stream);
|
||||
}
|
||||
});
|
||||
assertThat(exception.getMessage().toLowerCase(Locale.ROOT),
|
||||
containsString("premature end of content-length delimited message body"));
|
||||
either(containsString("premature end of chunk coded message body: closing chunk expected"))
|
||||
.or(containsString("premature end of content-length delimited message body")));
|
||||
assertThat(exception.getSuppressed().length, equalTo(Math.min(S3RetryingInputStream.MAX_SUPPRESSED_EXCEPTIONS, maxRetries)));
|
||||
}
|
||||
|
||||
@ -397,25 +509,49 @@ public class S3BlobContainerRetriesTests extends ESTestCase {
|
||||
return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb
|
||||
}
|
||||
|
||||
private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-9223372036854775806$");
|
||||
private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$");
|
||||
|
||||
private static int getRangeStart(HttpExchange exchange) {
|
||||
private static Tuple<Long, Long> getRange(HttpExchange exchange) {
|
||||
final String rangeHeader = exchange.getRequestHeaders().getFirst("Range");
|
||||
if (rangeHeader == null) {
|
||||
return 0;
|
||||
return Tuple.tuple(0L, MAX_RANGE_VAL);
|
||||
}
|
||||
|
||||
final Matcher matcher = RANGE_PATTERN.matcher(rangeHeader);
|
||||
assertTrue(rangeHeader + " matches expected pattern", matcher.matches());
|
||||
return Math.toIntExact(Long.parseLong(matcher.group(1)));
|
||||
long rangeStart = Long.parseLong(matcher.group(1));
|
||||
long rangeEnd = Long.parseLong(matcher.group(2));
|
||||
assertThat(rangeStart, lessThanOrEqualTo(rangeEnd));
|
||||
return Tuple.tuple(rangeStart, rangeEnd);
|
||||
}
|
||||
|
||||
private static int getRangeStart(HttpExchange exchange) {
|
||||
return Math.toIntExact(getRange(exchange).v1());
|
||||
}
|
||||
|
||||
private static Optional<Integer> getRangeEnd(HttpExchange exchange) {
|
||||
final long rangeEnd = getRange(exchange).v2();
|
||||
if (rangeEnd == MAX_RANGE_VAL) {
|
||||
return Optional.empty();
|
||||
}
|
||||
return Optional.of(Math.toIntExact(rangeEnd));
|
||||
}
|
||||
|
||||
private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException {
|
||||
final int rangeStart = getRangeStart(exchange);
|
||||
assertThat(rangeStart, lessThan(bytes.length));
|
||||
final Optional<Integer> rangeEnd = getRangeEnd(exchange);
|
||||
final int length;
|
||||
if (rangeEnd.isPresent()) {
|
||||
// adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35
|
||||
final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1);
|
||||
length = effectiveRangeEnd - rangeStart;
|
||||
} else {
|
||||
length = bytes.length - rangeStart - 1;
|
||||
}
|
||||
exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8");
|
||||
exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length - rangeStart);
|
||||
final int bytesToSend = randomIntBetween(0, bytes.length - rangeStart - 1);
|
||||
exchange.sendResponseHeaders(HttpStatus.SC_OK, length);
|
||||
final int bytesToSend = randomIntBetween(0, length - 1);
|
||||
if (bytesToSend > 0) {
|
||||
exchange.getResponseBody().write(bytes, rangeStart, bytesToSend);
|
||||
}
|
||||
|
@ -33,8 +33,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
@ -43,7 +42,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
@ -62,20 +60,20 @@ public class TransportClusterAllocationExplainAction
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final ShardsAllocator shardAllocator;
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
private final AllocationService allocationService;
|
||||
|
||||
@Inject
|
||||
public TransportClusterAllocationExplainAction(TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) {
|
||||
ShardsAllocator shardAllocator, AllocationService allocationService) {
|
||||
super(ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters,
|
||||
ClusterAllocationExplainRequest::new, indexNameExpressionResolver);
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardAllocator = shardAllocator;
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -105,27 +103,21 @@ public class TransportClusterAllocationExplainAction
|
||||
logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting);
|
||||
|
||||
ClusterAllocationExplanation cae = explainShard(shardRouting, allocation,
|
||||
request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator);
|
||||
request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), allocationService);
|
||||
listener.onResponse(new ClusterAllocationExplainResponse(cae));
|
||||
}
|
||||
|
||||
// public for testing
|
||||
public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
ClusterInfo clusterInfo, boolean includeYesDecisions,
|
||||
GatewayAllocator gatewayAllocator, ShardsAllocator shardAllocator) {
|
||||
AllocationService allocationService) {
|
||||
allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS);
|
||||
|
||||
ShardAllocationDecision shardDecision;
|
||||
if (shardRouting.initializing() || shardRouting.relocating()) {
|
||||
shardDecision = ShardAllocationDecision.NOT_TAKEN;
|
||||
} else {
|
||||
AllocateUnassignedDecision allocateDecision = shardRouting.unassigned() ?
|
||||
gatewayAllocator.decideUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN;
|
||||
if (allocateDecision.isDecisionTaken() == false) {
|
||||
shardDecision = shardAllocator.decideShardAllocation(shardRouting, allocation);
|
||||
} else {
|
||||
shardDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN);
|
||||
}
|
||||
shardDecision = allocationService.explainShardAllocation(shardRouting, allocation);
|
||||
}
|
||||
|
||||
return new ClusterAllocationExplanation(shardRouting,
|
||||
|
@ -36,12 +36,12 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
@ -55,15 +55,15 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(TransportClusterHealthAction.class);
|
||||
|
||||
private final GatewayAllocator gatewayAllocator;
|
||||
private final AllocationService allocationService;
|
||||
|
||||
@Inject
|
||||
public TransportClusterHealthAction(TransportService transportService, ClusterService clusterService,
|
||||
ThreadPool threadPool, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) {
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, AllocationService allocationService) {
|
||||
super(ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters,
|
||||
ClusterHealthRequest::new, indexNameExpressionResolver);
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
this.allocationService = allocationService;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -236,14 +236,14 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
||||
|
||||
private boolean validateRequest(final ClusterHealthRequest request, ClusterState clusterState, final int waitCount) {
|
||||
ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(),
|
||||
gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime());
|
||||
allocationService.getNumberOfInFlightFetches(), clusterService.getMasterService().getMaxTaskWaitTime());
|
||||
return prepareResponse(request, response, clusterState, indexNameExpressionResolver) == waitCount;
|
||||
}
|
||||
|
||||
private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState,
|
||||
final int waitFor, boolean timedOut) {
|
||||
ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(),
|
||||
gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime());
|
||||
allocationService.getNumberOfInFlightFetches(), clusterService.getMasterService().getMaxTaskWaitTime());
|
||||
int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver);
|
||||
boolean valid = (readyCounter == waitFor);
|
||||
assert valid || timedOut;
|
||||
|
@ -45,7 +45,7 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten
|
||||
@Nullable
|
||||
private RestoreInfo restoreInfo;
|
||||
|
||||
RestoreSnapshotResponse(@Nullable RestoreInfo restoreInfo) {
|
||||
public RestoreSnapshotResponse(@Nullable RestoreInfo restoreInfo) {
|
||||
this.restoreInfo = restoreInfo;
|
||||
}
|
||||
|
||||
|
@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService;
|
||||
import org.elasticsearch.cluster.metadata.RepositoriesMetadata;
|
||||
import org.elasticsearch.cluster.routing.DelayedAllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
@ -103,12 +104,14 @@ public class ClusterModule extends AbstractModule {
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private final AllocationService allocationService;
|
||||
private final List<ClusterPlugin> clusterPlugins;
|
||||
// pkg private for tests
|
||||
final Collection<AllocationDecider> deciderList;
|
||||
final ShardsAllocator shardsAllocator;
|
||||
|
||||
public ClusterModule(Settings settings, ClusterService clusterService, List<ClusterPlugin> clusterPlugins,
|
||||
ClusterInfoService clusterInfoService) {
|
||||
this.clusterPlugins = clusterPlugins;
|
||||
this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins);
|
||||
this.allocationDeciders = new AllocationDeciders(deciderList);
|
||||
this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins);
|
||||
@ -297,4 +300,22 @@ public class ClusterModule extends AbstractModule {
|
||||
bind(AllocationDeciders.class).toInstance(allocationDeciders);
|
||||
bind(ShardsAllocator.class).toInstance(shardsAllocator);
|
||||
}
|
||||
|
||||
public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator) {
|
||||
final Map<String, ExistingShardsAllocator> existingShardsAllocators = new HashMap<>();
|
||||
existingShardsAllocators.put(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator);
|
||||
|
||||
for (ClusterPlugin clusterPlugin : clusterPlugins) {
|
||||
for (Map.Entry<String, ExistingShardsAllocator> existingShardsAllocatorEntry
|
||||
: clusterPlugin.getExistingShardsAllocators().entrySet()) {
|
||||
final String allocatorName = existingShardsAllocatorEntry.getKey();
|
||||
if (existingShardsAllocators.put(allocatorName, existingShardsAllocatorEntry.getValue()) != null) {
|
||||
throw new IllegalArgumentException("ExistingShardsAllocator [" + allocatorName + "] from [" +
|
||||
clusterPlugin.getClass().getName() + "] was already defined");
|
||||
}
|
||||
}
|
||||
}
|
||||
allocationService.setExistingShardsAllocators(existingShardsAllocators);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -29,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
@ -883,7 +884,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
ignored.add(shard);
|
||||
}
|
||||
|
||||
public class UnassignedIterator implements Iterator<ShardRouting> {
|
||||
public class UnassignedIterator implements Iterator<ShardRouting>, ExistingShardsAllocator.UnassignedAllocationHandler {
|
||||
|
||||
private final ListIterator<ShardRouting> iterator;
|
||||
private ShardRouting current;
|
||||
@ -907,6 +908,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
@Override
|
||||
public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize,
|
||||
RoutingChangesObserver routingChangesObserver) {
|
||||
nodes.ensureMutable();
|
||||
@ -922,6 +924,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
*
|
||||
* @param attempt the result of the allocation attempt
|
||||
*/
|
||||
@Override
|
||||
public void removeAndIgnore(AllocationStatus attempt, RoutingChangesObserver changes) {
|
||||
nodes.ensureMutable();
|
||||
innerRemove();
|
||||
@ -940,6 +943,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
* @param recoverySource the new recovery source to use
|
||||
* @return the shard with unassigned info updated
|
||||
*/
|
||||
@Override
|
||||
public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource,
|
||||
RoutingChangesObserver changes) {
|
||||
nodes.ensureMutable();
|
||||
|
@ -30,6 +30,7 @@ import org.elasticsearch.cluster.health.ClusterStateHealth;
|
||||
import org.elasticsearch.cluster.metadata.AutoExpandReplicas;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
@ -39,8 +40,10 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.gateway.PriorityComparator;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
@ -57,7 +60,6 @@ import static java.util.Collections.emptyList;
|
||||
import static java.util.Collections.singletonList;
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING;
|
||||
|
||||
|
||||
/**
|
||||
* This service manages the node allocation of a cluster. For this reason the
|
||||
* {@link AllocationService} keeps {@link AllocationDeciders} to choose nodes
|
||||
@ -69,26 +71,31 @@ public class AllocationService {
|
||||
private static final Logger logger = LogManager.getLogger(AllocationService.class);
|
||||
|
||||
private final AllocationDeciders allocationDeciders;
|
||||
private GatewayAllocator gatewayAllocator;
|
||||
private Map<String, ExistingShardsAllocator> existingShardsAllocators;
|
||||
private final ShardsAllocator shardsAllocator;
|
||||
private final ClusterInfoService clusterInfoService;
|
||||
|
||||
public AllocationService(AllocationDeciders allocationDeciders,
|
||||
GatewayAllocator gatewayAllocator,
|
||||
// only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator
|
||||
public AllocationService(AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator,
|
||||
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
|
||||
this(allocationDeciders, shardsAllocator, clusterInfoService);
|
||||
setGatewayAllocator(gatewayAllocator);
|
||||
setExistingShardsAllocators(Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator));
|
||||
}
|
||||
|
||||
public AllocationService(AllocationDeciders allocationDeciders,
|
||||
ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) {
|
||||
public AllocationService(AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator,
|
||||
ClusterInfoService clusterInfoService) {
|
||||
this.allocationDeciders = allocationDeciders;
|
||||
this.shardsAllocator = shardsAllocator;
|
||||
this.clusterInfoService = clusterInfoService;
|
||||
}
|
||||
|
||||
public void setGatewayAllocator(GatewayAllocator gatewayAllocator) {
|
||||
this.gatewayAllocator = gatewayAllocator;
|
||||
/**
|
||||
* Inject the {@link ExistingShardsAllocator}s to use. May only be called once.
|
||||
*/
|
||||
public void setExistingShardsAllocators(Map<String, ExistingShardsAllocator> existingShardsAllocators) {
|
||||
assert this.existingShardsAllocators == null : "cannot set allocators " + existingShardsAllocators + " twice";
|
||||
assert existingShardsAllocators.isEmpty() == false : "must add at least one ExistingShardsAllocator";
|
||||
this.existingShardsAllocators = Collections.unmodifiableMap(existingShardsAllocators);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -98,6 +105,7 @@ public class AllocationService {
|
||||
* If the same instance of the {@link ClusterState} is returned, then no change has been made.</p>
|
||||
*/
|
||||
public ClusterState applyStartedShards(ClusterState clusterState, List<ShardRouting> startedShards) {
|
||||
assert assertInitialized();
|
||||
if (startedShards.isEmpty()) {
|
||||
return clusterState;
|
||||
}
|
||||
@ -108,9 +116,11 @@ public class AllocationService {
|
||||
clusterInfoService.getClusterInfo(), currentNanoTime());
|
||||
// as starting a primary relocation target can reinitialize replica shards, start replicas first
|
||||
startedShards = new ArrayList<>(startedShards);
|
||||
Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary));
|
||||
startedShards.sort(Comparator.comparing(ShardRouting::primary));
|
||||
applyStartedShards(allocation, startedShards);
|
||||
gatewayAllocator.applyStartedShards(allocation, startedShards);
|
||||
for (final ExistingShardsAllocator allocator : existingShardsAllocators.values()) {
|
||||
allocator.applyStartedShards(startedShards, allocation);
|
||||
}
|
||||
assert RoutingNodes.assertShardStats(allocation.routingNodes());
|
||||
String startedShardsAsString
|
||||
= firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString(), logger.isDebugEnabled());
|
||||
@ -171,6 +181,7 @@ public class AllocationService {
|
||||
*/
|
||||
public ClusterState applyFailedShards(final ClusterState clusterState, final List<FailedShard> failedShards,
|
||||
final List<StaleShard> staleShards) {
|
||||
assert assertInitialized();
|
||||
if (staleShards.isEmpty() && failedShards.isEmpty()) {
|
||||
return clusterState;
|
||||
}
|
||||
@ -216,7 +227,9 @@ public class AllocationService {
|
||||
logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail);
|
||||
}
|
||||
}
|
||||
gatewayAllocator.applyFailedShards(allocation, failedShards);
|
||||
for (final ExistingShardsAllocator allocator : existingShardsAllocators.values()) {
|
||||
allocator.applyFailedShards(failedShards, allocation);
|
||||
}
|
||||
|
||||
reroute(allocation);
|
||||
String failedShardsAsString
|
||||
@ -408,15 +421,43 @@ public class AllocationService {
|
||||
assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes";
|
||||
assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() :
|
||||
"auto-expand replicas out of sync with number of nodes in the cluster";
|
||||
assert assertInitialized();
|
||||
|
||||
removeDelayMarkers(allocation);
|
||||
// try to allocate existing shard copies first
|
||||
gatewayAllocator.allocateUnassigned(allocation);
|
||||
|
||||
allocateExistingUnassignedShards(allocation); // try to allocate existing shard copies first
|
||||
shardsAllocator.allocate(allocation);
|
||||
assert RoutingNodes.assertShardStats(allocation.routingNodes());
|
||||
}
|
||||
|
||||
private void allocateExistingUnassignedShards(RoutingAllocation allocation) {
|
||||
allocation.routingNodes().unassigned().sort(PriorityComparator.getAllocationComparator(allocation)); // sort for priority ordering
|
||||
|
||||
for (final ExistingShardsAllocator existingShardsAllocator : existingShardsAllocators.values()) {
|
||||
existingShardsAllocator.beforeAllocation(allocation);
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator primaryIterator = allocation.routingNodes().unassigned().iterator();
|
||||
while (primaryIterator.hasNext()) {
|
||||
final ShardRouting shardRouting = primaryIterator.next();
|
||||
if (shardRouting.primary()) {
|
||||
getAllocatorForShard(shardRouting, allocation).allocateUnassigned(shardRouting, allocation, primaryIterator);
|
||||
}
|
||||
}
|
||||
|
||||
for (final ExistingShardsAllocator existingShardsAllocator : existingShardsAllocators.values()) {
|
||||
existingShardsAllocator.afterPrimariesBeforeReplicas(allocation);
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator replicaIterator = allocation.routingNodes().unassigned().iterator();
|
||||
while (replicaIterator.hasNext()) {
|
||||
final ShardRouting shardRouting = replicaIterator.next();
|
||||
if (shardRouting.primary() == false) {
|
||||
getAllocatorForShard(shardRouting, allocation).allocateUnassigned(shardRouting, allocation, replicaIterator);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void disassociateDeadNodes(RoutingAllocation allocation) {
|
||||
for (Iterator<RoutingNode> it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) {
|
||||
RoutingNode node = it.next();
|
||||
@ -454,9 +495,11 @@ public class AllocationService {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a mutable {@link RoutingNodes}. This is a costly operation so this must only be called once!
|
||||
*/
|
||||
private RoutingNodes getMutableRoutingNodes(ClusterState clusterState) {
|
||||
RoutingNodes routingNodes = new RoutingNodes(clusterState, false); // this is a costly operation - only call this once!
|
||||
return routingNodes;
|
||||
return new RoutingNodes(clusterState, false);
|
||||
}
|
||||
|
||||
/** override this to control time based decisions during allocation */
|
||||
@ -465,7 +508,103 @@ public class AllocationService {
|
||||
}
|
||||
|
||||
public void cleanCaches() {
|
||||
gatewayAllocator.cleanCaches();
|
||||
assert assertInitialized();
|
||||
existingShardsAllocators.values().forEach(ExistingShardsAllocator::cleanCaches);
|
||||
}
|
||||
|
||||
public int getNumberOfInFlightFetches() {
|
||||
assert assertInitialized();
|
||||
return existingShardsAllocators.values().stream().mapToInt(ExistingShardsAllocator::getNumberOfInFlightFetches).sum();
|
||||
}
|
||||
|
||||
public ShardAllocationDecision explainShardAllocation(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
assert allocation.debugDecision();
|
||||
AllocateUnassignedDecision allocateDecision
|
||||
= shardRouting.unassigned() ? explainUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN;
|
||||
if (allocateDecision.isDecisionTaken()) {
|
||||
return new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN);
|
||||
} else {
|
||||
return shardsAllocator.decideShardAllocation(shardRouting, allocation);
|
||||
}
|
||||
}
|
||||
|
||||
private AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting shardRouting, RoutingAllocation routingAllocation) {
|
||||
assert shardRouting.unassigned();
|
||||
assert routingAllocation.debugDecision();
|
||||
assert assertInitialized();
|
||||
final ExistingShardsAllocator existingShardsAllocator = getAllocatorForShard(shardRouting, routingAllocation);
|
||||
final AllocateUnassignedDecision decision
|
||||
= existingShardsAllocator.explainUnassignedShardAllocation(shardRouting, routingAllocation);
|
||||
if (decision.isDecisionTaken()) {
|
||||
return decision;
|
||||
}
|
||||
return AllocateUnassignedDecision.NOT_TAKEN;
|
||||
}
|
||||
|
||||
private ExistingShardsAllocator getAllocatorForShard(ShardRouting shardRouting, RoutingAllocation routingAllocation) {
|
||||
assert assertInitialized();
|
||||
final String allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get(
|
||||
routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings());
|
||||
final ExistingShardsAllocator existingShardsAllocator = existingShardsAllocators.get(allocatorName);
|
||||
return existingShardsAllocator != null ? existingShardsAllocator : new NotFoundAllocator(allocatorName);
|
||||
}
|
||||
|
||||
private boolean assertInitialized() {
|
||||
assert existingShardsAllocators != null: "must have set allocators first";
|
||||
return true;
|
||||
}
|
||||
|
||||
private static class NotFoundAllocator implements ExistingShardsAllocator {
|
||||
private final String allocatorName;
|
||||
|
||||
private NotFoundAllocator(String allocatorName) {
|
||||
this.allocatorName = allocatorName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeAllocation(RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
unassignedAllocationHandler.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation allocation) {
|
||||
assert unassignedShard.unassigned();
|
||||
assert allocation.debugDecision();
|
||||
final List<NodeAllocationResult> nodeAllocationResults = new ArrayList<>(allocation.nodes().getSize());
|
||||
for (DiscoveryNode discoveryNode : allocation.nodes()) {
|
||||
nodeAllocationResults.add(new NodeAllocationResult(discoveryNode, null, allocation.decision(Decision.NO,
|
||||
"allocator_plugin", "finding the previous copies of this shard requires an allocator called [%s] but " +
|
||||
"that allocator was not found; perhaps the corresponding plugin is not installed",
|
||||
allocatorName)));
|
||||
}
|
||||
return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, nodeAllocationResults);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanCaches() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumberOfInFlightFetches() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -0,0 +1,123 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingChangesObserver;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Searches for, and allocates, shards for which there is an existing on-disk copy somewhere in the cluster. The default implementation is
|
||||
* {@link GatewayAllocator}, but plugins can supply their own implementations too.
|
||||
*/
|
||||
public interface ExistingShardsAllocator {
|
||||
|
||||
/**
|
||||
* Allows plugins to override how we allocate shards that may already exist on disk in the cluster.
|
||||
*/
|
||||
Setting<String> EXISTING_SHARDS_ALLOCATOR_SETTING = Setting.simpleString(
|
||||
"index.allocation.existing_shards_allocator", GatewayAllocator.ALLOCATOR_NAME,
|
||||
Setting.Property.IndexScope, Setting.Property.PrivateIndex);
|
||||
|
||||
/**
|
||||
* Called before starting a round of allocation, allowing the allocator to invalidate some caches if appropriate.
|
||||
*/
|
||||
void beforeAllocation(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Called during a round of allocation after attempting to allocate all the primaries but before any replicas, allowing the allocator
|
||||
* to prepare for replica allocation.
|
||||
*/
|
||||
void afterPrimariesBeforeReplicas(RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Allocate any unassigned shards in the given {@link RoutingAllocation} for which this {@link ExistingShardsAllocator} is responsible.
|
||||
*/
|
||||
void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler);
|
||||
|
||||
/**
|
||||
* Returns an explanation for a single unassigned shard.
|
||||
*/
|
||||
AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation);
|
||||
|
||||
/**
|
||||
* Called when this node becomes the elected master and when it stops being the elected master, so that implementations can clean up any
|
||||
* in-flight activity from an earlier mastership.
|
||||
*/
|
||||
void cleanCaches();
|
||||
|
||||
/**
|
||||
* Called when the given shards have started, so that implementations can invalidate caches and clean up any in-flight activity for
|
||||
* those shards.
|
||||
*/
|
||||
void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* Called when the given shards have failed, so that implementations can invalidate caches and clean up any in-flight activity for
|
||||
* those shards.
|
||||
*/
|
||||
void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation);
|
||||
|
||||
/**
|
||||
* @return the number of in-flight fetches under this allocator's control.
|
||||
*/
|
||||
int getNumberOfInFlightFetches();
|
||||
|
||||
/**
|
||||
* Used by {@link ExistingShardsAllocator#allocateUnassigned} to handle its allocation decisions. A restricted interface to
|
||||
* {@link RoutingNodes.UnassignedShards.UnassignedIterator} to limit what allocators can do.
|
||||
*/
|
||||
interface UnassignedAllocationHandler {
|
||||
|
||||
/**
|
||||
* Initializes the current unassigned shard and moves it from the unassigned list.
|
||||
*
|
||||
* @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated.
|
||||
*/
|
||||
ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize,
|
||||
RoutingChangesObserver routingChangesObserver);
|
||||
|
||||
/**
|
||||
* Removes and ignores the unassigned shard (will be ignored for this run, but
|
||||
* will be added back to unassigned once the metadata is constructed again).
|
||||
* Typically this is used when an allocation decision prevents a shard from being allocated such
|
||||
* that subsequent consumers of this API won't try to allocate this shard again.
|
||||
*
|
||||
* @param attempt the result of the allocation attempt
|
||||
*/
|
||||
void removeAndIgnore(UnassignedInfo.AllocationStatus attempt, RoutingChangesObserver changes);
|
||||
|
||||
/**
|
||||
* updates the unassigned info and recovery source on the current unassigned shard
|
||||
*
|
||||
* @param unassignedInfo the new unassigned info to use
|
||||
* @param recoverySource the new recovery source to use
|
||||
* @return the shard with unassigned info updated
|
||||
*/
|
||||
ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource, RoutingChangesObserver changes);
|
||||
}
|
||||
}
|
@ -49,6 +49,40 @@ public interface BlobContainer {
|
||||
*/
|
||||
InputStream readBlob(String blobName) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a new {@link InputStream} that can be used to read the given blob starting from
|
||||
* a specific {@code position} in the blob. The {@code length} is an indication of the
|
||||
* number of bytes that are expected to be read from the {@link InputStream}.
|
||||
*
|
||||
* @param blobName The name of the blob to get an {@link InputStream} for.
|
||||
* @param position The position in the blob where the next byte will be read.
|
||||
* @param length An indication of the number of bytes to be read.
|
||||
* @return The {@code InputStream} to read the blob.
|
||||
* @throws NoSuchFileException if the blob does not exist
|
||||
* @throws IOException if the blob can not be read.
|
||||
*/
|
||||
default InputStream readBlob(final String blobName, final long position, final long length) throws IOException {
|
||||
throw new UnsupportedOperationException(); // NORELEASE
|
||||
}
|
||||
|
||||
/**
|
||||
* Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}.
|
||||
*
|
||||
* Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively
|
||||
* request more data than they need right now and to re-use this stream for future needs if possible.
|
||||
*
|
||||
* Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may
|
||||
* depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by
|
||||
* bounding the length of the data requested.
|
||||
*
|
||||
* @return a hint to consumers regarding the length of data to request if there is a good chance that future reads can be satisfied from
|
||||
* the same stream.
|
||||
*
|
||||
*/
|
||||
default long readBlobPreferredLength() {
|
||||
throw new UnsupportedOperationException(); // NORELEASE
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads blob content from the input stream and writes it to the container in a new blob with the given name.
|
||||
* This method assumes the container does not already contain a blob of the same blobName. If a blob by the
|
||||
|
@ -34,6 +34,8 @@ import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.nio.channels.Channels;
|
||||
import java.nio.channels.SeekableByteChannel;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.FileAlreadyExistsException;
|
||||
import java.nio.file.FileVisitResult;
|
||||
@ -142,16 +144,36 @@ public class FsBlobContainer extends AbstractBlobContainer {
|
||||
IOUtils.rm(blobNames.stream().map(path::resolve).toArray(Path[]::new));
|
||||
}
|
||||
|
||||
private InputStream bufferedInputStream(InputStream inputStream) {
|
||||
return new BufferedInputStream(inputStream, blobStore.bufferSizeInBytes());
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String name) throws IOException {
|
||||
final Path resolvedPath = path.resolve(name);
|
||||
try {
|
||||
return new BufferedInputStream(Files.newInputStream(resolvedPath), blobStore.bufferSizeInBytes());
|
||||
return bufferedInputStream(Files.newInputStream(resolvedPath));
|
||||
} catch (FileNotFoundException fnfe) {
|
||||
throw new NoSuchFileException("[" + name + "] blob not found");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName, long position, long length) throws IOException {
|
||||
final SeekableByteChannel channel = Files.newByteChannel(path.resolve(blobName));
|
||||
if (position > 0L) {
|
||||
channel.position(position);
|
||||
}
|
||||
assert channel.position() == position;
|
||||
return bufferedInputStream(org.elasticsearch.common.io.Streams.limitStream(Channels.newInputStream(channel), length));
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readBlobPreferredLength() {
|
||||
// This container returns streams that are cheap to close early, so we can tell consumers to request as much data as possible.
|
||||
return Long.MAX_VALUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
|
||||
if (failIfAlreadyExists == false) {
|
||||
|
@ -205,6 +205,13 @@ public abstract class Streams {
|
||||
return read;
|
||||
}
|
||||
|
||||
/**
|
||||
* Fully consumes the input stream, throwing the bytes away. Returns the number of bytes consumed.
|
||||
*/
|
||||
public static long consumeFully(InputStream inputStream) throws IOException {
|
||||
return copy(inputStream, new NullOutputStream());
|
||||
}
|
||||
|
||||
public static List<String> readAllLines(InputStream input) throws IOException {
|
||||
final List<String> lines = new ArrayList<>();
|
||||
readAllLines(input, lines::add);
|
||||
@ -254,6 +261,13 @@ public abstract class Streams {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Limits the given input stream to the provided number of bytes
|
||||
*/
|
||||
public static InputStream limitStream(InputStream in, long limit) {
|
||||
return new LimitedInputStream(in, limit);
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper around a {@link BytesStream} that makes the close operation a flush. This is
|
||||
* needed as sometimes a stream will be closed but the bytes that the stream holds still need
|
||||
@ -297,4 +311,93 @@ public abstract class Streams {
|
||||
return delegate.bytes();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A wrapper around an {@link InputStream} that limits the number of bytes that can be read from the stream.
|
||||
*/
|
||||
static class LimitedInputStream extends FilterInputStream {
|
||||
|
||||
private static final long NO_MARK = -1L;
|
||||
|
||||
private long currentLimit; // is always non-negative
|
||||
private long limitOnLastMark;
|
||||
|
||||
LimitedInputStream(InputStream in, long limit) {
|
||||
super(in);
|
||||
if (limit < 0L) {
|
||||
throw new IllegalArgumentException("limit must be non-negative");
|
||||
}
|
||||
this.currentLimit = limit;
|
||||
this.limitOnLastMark = NO_MARK;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
final int result;
|
||||
if (currentLimit == 0 || (result = in.read()) == -1) {
|
||||
return -1;
|
||||
} else {
|
||||
currentLimit--;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
final int result;
|
||||
if (currentLimit == 0 || (result = in.read(b, off, Math.toIntExact(Math.min(len, currentLimit)))) == -1) {
|
||||
return -1;
|
||||
} else {
|
||||
currentLimit -= result;
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long skip(long n) throws IOException {
|
||||
final long skipped = in.skip(Math.min(n, currentLimit));
|
||||
currentLimit -= skipped;
|
||||
return skipped;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
return Math.toIntExact(Math.min(in.available(), currentLimit));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
in.close();
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void mark(int readlimit) {
|
||||
in.mark(readlimit);
|
||||
limitOnLastMark = currentLimit;
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized void reset() throws IOException {
|
||||
in.reset();
|
||||
if (limitOnLastMark != NO_MARK) {
|
||||
currentLimit = limitOnLastMark;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* OutputStream that just throws all the bytes away
|
||||
*/
|
||||
static class NullOutputStream extends OutputStream {
|
||||
|
||||
@Override
|
||||
public void write(int b) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void write(byte[] b, int off, int len) {
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.common.settings;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.MetadataIndexStateService;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
|
||||
@ -171,6 +172,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
||||
IndexSettings.FINAL_PIPELINE,
|
||||
MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING,
|
||||
IndexSettings.ON_HEAP_ID_TERMS_INDEX,
|
||||
ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING,
|
||||
|
||||
// validate that built-in similarities don't get redefined
|
||||
Setting.groupSetting("index.similarity.", (s) -> {
|
||||
|
@ -22,10 +22,10 @@ package org.elasticsearch.gateway;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
@ -45,40 +45,37 @@ public abstract class BaseGatewayShardAllocator {
|
||||
protected final Logger logger = LogManager.getLogger(this.getClass());
|
||||
|
||||
/**
|
||||
* Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist.
|
||||
* Allocate an unassigned shard to nodes (if any) where valid copies of the shard already exist.
|
||||
* It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)}
|
||||
* to make decisions on assigning shards to nodes.
|
||||
*
|
||||
* @param shardRouting the shard to allocate
|
||||
* @param allocation the allocation state container object
|
||||
* @param unassignedAllocationHandler handles the allocation of the current shard
|
||||
*/
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
final ShardRouting shard = unassignedIterator.next();
|
||||
final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shard, allocation, logger);
|
||||
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
ExistingShardsAllocator.UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shardRouting, allocation, logger);
|
||||
|
||||
if (allocateUnassignedDecision.isDecisionTaken() == false) {
|
||||
// no decision was taken by this allocator
|
||||
continue;
|
||||
}
|
||||
if (allocateUnassignedDecision.isDecisionTaken() == false) {
|
||||
// no decision was taken by this allocator
|
||||
return;
|
||||
}
|
||||
|
||||
if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) {
|
||||
unassignedIterator.initialize(allocateUnassignedDecision.getTargetNode().getId(),
|
||||
allocateUnassignedDecision.getAllocationId(),
|
||||
shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
|
||||
allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
|
||||
allocation.changes());
|
||||
} else {
|
||||
unassignedIterator.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes());
|
||||
}
|
||||
if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) {
|
||||
unassignedAllocationHandler.initialize(allocateUnassignedDecision.getTargetNode().getId(),
|
||||
allocateUnassignedDecision.getAllocationId(),
|
||||
shardRouting.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
|
||||
allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
|
||||
allocation.changes());
|
||||
} else {
|
||||
unassignedAllocationHandler.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a decision on the allocation of an unassigned shard. This method is used by
|
||||
* {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not
|
||||
* the shard can be allocated by this allocator and if so, to which node it will be allocated.
|
||||
* {@link #allocateUnassigned(ShardRouting, RoutingAllocation, ExistingShardsAllocator.UnassignedAllocationHandler)} to make decisions
|
||||
* about whether or not the shard can be allocated by this allocator and if so, to which node it will be allocated.
|
||||
*
|
||||
* @param unassignedShard the unassigned shard to allocate
|
||||
* @param allocation the current routing state
|
||||
|
@ -30,9 +30,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.RerouteService;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.FailedShard;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.Priority;
|
||||
@ -50,7 +50,9 @@ import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.StreamSupport;
|
||||
|
||||
public class GatewayAllocator {
|
||||
public class GatewayAllocator implements ExistingShardsAllocator {
|
||||
|
||||
public static final String ALLOCATOR_NAME = "gateway_allocator";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(GatewayAllocator.class);
|
||||
|
||||
@ -74,6 +76,7 @@ public class GatewayAllocator {
|
||||
this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanCaches() {
|
||||
Releasables.close(asyncFetchStarted.values());
|
||||
asyncFetchStarted.clear();
|
||||
@ -88,7 +91,8 @@ public class GatewayAllocator {
|
||||
this.replicaShardAllocator = null;
|
||||
}
|
||||
|
||||
public int getNumberOfInFlightFetch() {
|
||||
@Override
|
||||
public int getNumberOfInFlightFetches() {
|
||||
int count = 0;
|
||||
for (AsyncShardFetch<TransportNodesListGatewayStartedShards.NodeGatewayStartedShards> fetch : asyncFetchStarted.values()) {
|
||||
count += fetch.getNumberOfInFlightFetches();
|
||||
@ -99,47 +103,64 @@ public class GatewayAllocator {
|
||||
return count;
|
||||
}
|
||||
|
||||
public void applyStartedShards(final RoutingAllocation allocation, final List<ShardRouting> startedShards) {
|
||||
@Override
|
||||
public void applyStartedShards(final List<ShardRouting> startedShards, final RoutingAllocation allocation) {
|
||||
for (ShardRouting startedShard : startedShards) {
|
||||
Releasables.close(asyncFetchStarted.remove(startedShard.shardId()));
|
||||
Releasables.close(asyncFetchStore.remove(startedShard.shardId()));
|
||||
}
|
||||
}
|
||||
|
||||
public void applyFailedShards(final RoutingAllocation allocation, final List<FailedShard> failedShards) {
|
||||
@Override
|
||||
public void applyFailedShards(final List<FailedShard> failedShards, final RoutingAllocation allocation) {
|
||||
for (FailedShard failedShard : failedShards) {
|
||||
Releasables.close(asyncFetchStarted.remove(failedShard.getRoutingEntry().shardId()));
|
||||
Releasables.close(asyncFetchStore.remove(failedShard.getRoutingEntry().shardId()));
|
||||
}
|
||||
}
|
||||
|
||||
public void allocateUnassigned(final RoutingAllocation allocation) {
|
||||
@Override
|
||||
public void beforeAllocation(final RoutingAllocation allocation) {
|
||||
assert primaryShardAllocator != null;
|
||||
assert replicaShardAllocator != null;
|
||||
ensureAsyncFetchStorePrimaryRecency(allocation);
|
||||
innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {
|
||||
assert replicaShardAllocator != null;
|
||||
if (allocation.routingNodes().hasInactiveShards()) {
|
||||
// cancel existing recoveries if we have a better match
|
||||
replicaShardAllocator.processExistingRecoveries(allocation);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(ShardRouting shardRouting, final RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
assert primaryShardAllocator != null;
|
||||
assert replicaShardAllocator != null;
|
||||
innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator, shardRouting, unassignedAllocationHandler);
|
||||
}
|
||||
|
||||
// allow for testing infra to change shard allocators implementation
|
||||
protected static void innerAllocatedUnassigned(RoutingAllocation allocation,
|
||||
PrimaryShardAllocator primaryShardAllocator,
|
||||
ReplicaShardAllocator replicaShardAllocator) {
|
||||
RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
|
||||
unassigned.sort(PriorityComparator.getAllocationComparator(allocation)); // sort for priority ordering
|
||||
|
||||
primaryShardAllocator.allocateUnassigned(allocation);
|
||||
if (allocation.routingNodes().hasInactiveShards()) {
|
||||
// cancel existing recoveries if we have a better match
|
||||
replicaShardAllocator.processExistingRecoveries(allocation);
|
||||
ReplicaShardAllocator replicaShardAllocator,
|
||||
ShardRouting shardRouting,
|
||||
ExistingShardsAllocator.UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
assert shardRouting.unassigned();
|
||||
if (shardRouting.primary()) {
|
||||
primaryShardAllocator.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler);
|
||||
} else {
|
||||
replicaShardAllocator.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler);
|
||||
}
|
||||
replicaShardAllocator.allocateUnassigned(allocation);
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes and returns the design for allocating a single unassigned shard. If called on an assigned shard,
|
||||
* {@link AllocateUnassignedDecision#NOT_TAKEN} is returned.
|
||||
*/
|
||||
public AllocateUnassignedDecision decideUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) {
|
||||
@Override
|
||||
public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) {
|
||||
assert unassignedShard.unassigned();
|
||||
assert routingAllocation.debugDecision();
|
||||
if (unassignedShard.primary()) {
|
||||
assert primaryShardAllocator != null;
|
||||
return primaryShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger);
|
||||
|
@ -271,4 +271,17 @@ final class CompositeIndexEventListener implements IndexEventListener {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void beforeIndexShardRecovery(final IndexShard indexShard, final IndexSettings indexSettings) {
|
||||
for (IndexEventListener listener : listeners) {
|
||||
try {
|
||||
listener.beforeIndexShardRecovery(indexShard, indexSettings);
|
||||
} catch (Exception e) {
|
||||
logger.warn(() -> new ParameterizedMessage("failed to invoke the listener before the shard recovery starts for {}",
|
||||
indexShard.shardId()), e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -174,4 +174,15 @@ public interface IndexEventListener {
|
||||
* @param shardId the shard ID the store belongs to
|
||||
*/
|
||||
default void onStoreClosed(ShardId shardId) {}
|
||||
|
||||
/**
|
||||
* Called before the index shard starts to recover.
|
||||
* Note: unlike all other methods in this class, this method is not called using the cluster state update thread. When this method is
|
||||
* called the shard already transitioned to the RECOVERING state.
|
||||
*
|
||||
* @param indexShard the shard that is about to recover
|
||||
* @param indexSettings the shard's index settings
|
||||
*/
|
||||
default void beforeIndexShardRecovery(IndexShard indexShard, IndexSettings indexSettings) {
|
||||
}
|
||||
}
|
||||
|
@ -1354,6 +1354,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl
|
||||
}
|
||||
}
|
||||
|
||||
public void preRecovery() {
|
||||
final IndexShardState currentState = this.state; // single volatile read
|
||||
if (currentState == IndexShardState.CLOSED) {
|
||||
throw new IndexShardNotRecoveringException(shardId, currentState);
|
||||
}
|
||||
assert currentState == IndexShardState.RECOVERING : "expected a recovering shard " + shardId + " but got " + currentState;
|
||||
indexEventListener.beforeIndexShardRecovery(this, indexSettings);
|
||||
}
|
||||
|
||||
public void postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException {
|
||||
synchronized (postRecoveryMutex) {
|
||||
// we need to refresh again to expose all operations that were index until now. Otherwise
|
||||
|
@ -369,6 +369,7 @@ final class StoreRecovery {
|
||||
* Recovers the state of the shard from the store.
|
||||
*/
|
||||
private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRecoveryException {
|
||||
indexShard.preRecovery();
|
||||
final RecoveryState recoveryState = indexShard.recoveryState();
|
||||
final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE;
|
||||
indexShard.prepareForIndexRecovery();
|
||||
@ -460,6 +461,7 @@ final class StoreRecovery {
|
||||
private void restore(IndexShard indexShard, Repository repository, SnapshotRecoverySource restoreSource,
|
||||
ActionListener<Boolean> listener) {
|
||||
logger.debug("restoring from {} ...", indexShard.recoveryState().getRecoverySource());
|
||||
indexShard.preRecovery();
|
||||
final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog();
|
||||
if (restoreSource == null) {
|
||||
listener.onFailure(new IndexShardRestoreFailedException(shardId, "empty restore source"));
|
||||
|
@ -23,6 +23,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
@ -1852,20 +1854,58 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC
|
||||
|
||||
static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId,
|
||||
ChannelFactory channelFactory, long primaryTerm) throws IOException {
|
||||
return createEmptyTranslog(location, shardId, initialGlobalCheckpoint, primaryTerm, null, channelFactory);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new empty translog within the specified {@code location} that contains the given {@code initialGlobalCheckpoint},
|
||||
* {@code primaryTerm} and {@code translogUUID}.
|
||||
*
|
||||
* This method should be used directly under specific circumstances like for shards that will see no indexing. Specifying a non-unique
|
||||
* translog UUID could cause a lot of issues and that's why in all (but one) cases the method
|
||||
* {@link #createEmptyTranslog(Path, long, ShardId, long)} should be used instead.
|
||||
*
|
||||
* @param location a {@link Path} to the directory that will contains the translog files (translog + translog checkpoint)
|
||||
* @param shardId the {@link ShardId}
|
||||
* @param initialGlobalCheckpoint the global checkpoint to initialize the translog with
|
||||
* @param primaryTerm the shard's primary term to initialize the translog with
|
||||
* @param translogUUID the unique identifier to initialize the translog with
|
||||
* @param factory a {@link ChannelFactory} used to open translog files
|
||||
* @return the translog's unique identifier
|
||||
* @throws IOException if something went wrong during translog creation
|
||||
*/
|
||||
public static String createEmptyTranslog(final Path location,
|
||||
final ShardId shardId,
|
||||
final long initialGlobalCheckpoint,
|
||||
final long primaryTerm,
|
||||
@Nullable final String translogUUID,
|
||||
@Nullable final ChannelFactory factory) throws IOException {
|
||||
IOUtils.rm(location);
|
||||
Files.createDirectories(location);
|
||||
final Checkpoint checkpoint =
|
||||
Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1);
|
||||
|
||||
final long generation = 1L;
|
||||
final long minTranslogGeneration = 1L;
|
||||
final ChannelFactory channelFactory = factory != null ? factory : FileChannel::open;
|
||||
final String uuid = Strings.hasLength(translogUUID) ? translogUUID : UUIDs.randomBase64UUID();
|
||||
final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME);
|
||||
final Path translogFile = location.resolve(getFilename(generation));
|
||||
final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, minTranslogGeneration);
|
||||
|
||||
Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW);
|
||||
IOUtils.fsync(checkpointFile, false);
|
||||
final String translogUUID = UUIDs.randomBase64UUID();
|
||||
TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1,
|
||||
location.resolve(getFilename(1)), channelFactory,
|
||||
new ByteSizeValue(10), 1, initialGlobalCheckpoint,
|
||||
() -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm,
|
||||
new TragicExceptionHolder(), seqNo -> { throw new UnsupportedOperationException(); });
|
||||
final TranslogWriter writer = TranslogWriter.create(shardId, uuid, generation, translogFile, channelFactory,
|
||||
new ByteSizeValue(10), minTranslogGeneration, initialGlobalCheckpoint,
|
||||
() -> {
|
||||
throw new UnsupportedOperationException();
|
||||
}, () -> {
|
||||
throw new UnsupportedOperationException();
|
||||
},
|
||||
primaryTerm,
|
||||
new TragicExceptionHolder(),
|
||||
seqNo -> {
|
||||
throw new UnsupportedOperationException();
|
||||
});
|
||||
writer.close();
|
||||
return translogUUID;
|
||||
return uuid;
|
||||
}
|
||||
}
|
||||
|
@ -172,10 +172,12 @@ public class PeerRecoveryTargetService implements IndexEventListener {
|
||||
timer = recoveryTarget.state().getTimer();
|
||||
cancellableThreads = recoveryTarget.cancellableThreads();
|
||||
try {
|
||||
final IndexShard indexShard = recoveryTarget.indexShard();
|
||||
indexShard.preRecovery();
|
||||
assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node";
|
||||
logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId());
|
||||
recoveryTarget.indexShard().prepareForIndexRecovery();
|
||||
final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint();
|
||||
indexShard.prepareForIndexRecovery();
|
||||
final long startingSeqNo = indexShard.recoverLocallyUpToGlobalCheckpoint();
|
||||
assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG :
|
||||
"unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]";
|
||||
request = getStartRecoveryRequest(logger, clusterService.localNode(), recoveryTarget, startingSeqNo);
|
||||
|
@ -616,8 +616,12 @@ public class Node implements Closeable {
|
||||
);
|
||||
injector = modules.createInjector();
|
||||
|
||||
// TODO hack around circular dependencies problems in AllocationService
|
||||
clusterModule.getAllocationService().setGatewayAllocator(injector.getInstance(GatewayAllocator.class));
|
||||
// We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there.
|
||||
// The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it
|
||||
// completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation
|
||||
// service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a
|
||||
// reroute, which needs to call into the allocation service. We close the loop here:
|
||||
clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class));
|
||||
|
||||
List<LifecycleComponent> pluginLifecycleComponents = pluginComponents.stream()
|
||||
.filter(p -> p instanceof LifecycleComponent)
|
||||
|
@ -19,16 +19,17 @@
|
||||
|
||||
package org.elasticsearch.plugins;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.function.Supplier;
|
||||
|
||||
/**
|
||||
* An extension point for {@link Plugin} implementations to customer behavior of cluster management.
|
||||
*/
|
||||
@ -59,6 +60,15 @@ public interface ClusterPlugin {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return {@link ExistingShardsAllocator} implementations added by this plugin; the index setting
|
||||
* {@link ExistingShardsAllocator#EXISTING_SHARDS_ALLOCATOR_SETTING} sets the key of the allocator to use to allocate its shards. The
|
||||
* default allocator is {@link org.elasticsearch.gateway.GatewayAllocator}.
|
||||
*/
|
||||
default Map<String, ExistingShardsAllocator> getExistingShardsAllocators() {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Called when the node is started
|
||||
*/
|
||||
|
@ -25,6 +25,7 @@ import java.util.Map;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.repositories.RepositoriesModule;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
|
||||
/**
|
||||
@ -58,4 +59,13 @@ public interface RepositoryPlugin {
|
||||
ClusterService clusterService) {
|
||||
return Collections.emptyMap();
|
||||
}
|
||||
|
||||
/**
|
||||
* Passes down the current {@link RepositoriesModule} to repository plugins.
|
||||
*
|
||||
* @param module the current {@link RepositoriesModule}
|
||||
*/
|
||||
default void onRepositoriesModule(RepositoriesModule module) {
|
||||
// NORELEASE
|
||||
}
|
||||
}
|
||||
|
@ -73,6 +73,8 @@ public final class RepositoriesModule {
|
||||
Map<String, Repository.Factory> internalRepositoryTypes = Collections.unmodifiableMap(internalFactories);
|
||||
repositoriesService = new RepositoriesService(settings, clusterService, transportService, repositoryTypes,
|
||||
internalRepositoryTypes, threadPool);
|
||||
|
||||
repoPlugins.forEach(rp -> rp.onRepositoriesModule(this));
|
||||
}
|
||||
|
||||
public RepositoriesService getRepositoryService() {
|
||||
|
@ -1013,7 +1013,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
return shardContainer(indexId, shardId.getId());
|
||||
}
|
||||
|
||||
private BlobContainer shardContainer(IndexId indexId, int shardId) {
|
||||
public BlobContainer shardContainer(IndexId indexId, int shardId) {
|
||||
return blobStore().blobContainer(indicesPath().add(indexId.getId()).add(Integer.toString(shardId)));
|
||||
}
|
||||
|
||||
@ -1045,8 +1045,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
}
|
||||
|
||||
protected void assertSnapshotOrGenericThread() {
|
||||
assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT)
|
||||
|| Thread.currentThread().getName().contains(ThreadPool.Names.GENERIC) :
|
||||
assert Thread.currentThread().getName().contains('[' + ThreadPool.Names.SNAPSHOT + ']')
|
||||
|| Thread.currentThread().getName().contains('[' + ThreadPool.Names.GENERIC + ']') :
|
||||
"Expected current thread [" + Thread.currentThread() + "] to be the snapshot or generic thread.";
|
||||
}
|
||||
|
||||
@ -1984,7 +1984,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
||||
/**
|
||||
* Loads information about shard snapshot
|
||||
*/
|
||||
private BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) {
|
||||
public BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) {
|
||||
try {
|
||||
return indexShardSnapshotFormat.read(shardContainer, snapshotId.getUUID());
|
||||
} catch (NoSuchFileException ex) {
|
||||
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
@ -57,7 +58,7 @@ public class ClusterAllocationExplainActionTests extends ESTestCase {
|
||||
RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()),
|
||||
clusterState.getRoutingNodes(), clusterState, null, System.nanoTime());
|
||||
ClusterAllocationExplanation cae = TransportClusterAllocationExplainAction.explainShard(shard, allocation, null, randomBoolean(),
|
||||
new TestGatewayAllocator(), new ShardsAllocator() {
|
||||
new AllocationService(null, new TestGatewayAllocator(), new ShardsAllocator() {
|
||||
@Override
|
||||
public void allocate(RoutingAllocation allocation) {
|
||||
// no-op
|
||||
@ -71,7 +72,7 @@ public class ClusterAllocationExplainActionTests extends ESTestCase {
|
||||
throw new UnsupportedOperationException("cannot explain");
|
||||
}
|
||||
}
|
||||
});
|
||||
}, null));
|
||||
|
||||
assertEquals(shard.currentNodeId(), cae.getCurrentNode().getId());
|
||||
assertFalse(cae.getShardAllocationDecision().isDecisionTaken());
|
||||
|
@ -22,6 +22,7 @@ package org.elasticsearch.cluster;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.cluster.metadata.RepositoriesMetadata;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
@ -50,7 +51,9 @@ import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.plugins.ClusterPlugin;
|
||||
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
@ -225,4 +228,26 @@ public class ClusterModuleTests extends ModuleTestCase {
|
||||
assertNotNull(fixedClusterState.metadata().custom(whiteListedMetadataCustom));
|
||||
assertNull(fixedClusterState.metadata().custom("other"));
|
||||
}
|
||||
|
||||
public void testRejectsReservedExistingShardsAllocatorName() {
|
||||
final ClusterModule clusterModule = new ClusterModule(Settings.EMPTY, clusterService,
|
||||
Collections.singletonList(existingShardsAllocatorPlugin(GatewayAllocator.ALLOCATOR_NAME)), clusterInfoService);
|
||||
expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator()));
|
||||
}
|
||||
|
||||
public void testRejectsDuplicateExistingShardsAllocatorName() {
|
||||
final ClusterModule clusterModule = new ClusterModule(Settings.EMPTY, clusterService,
|
||||
Arrays.asList(existingShardsAllocatorPlugin("duplicate"), existingShardsAllocatorPlugin("duplicate")), clusterInfoService);
|
||||
expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator()));
|
||||
}
|
||||
|
||||
private static ClusterPlugin existingShardsAllocatorPlugin(final String allocatorName) {
|
||||
return new ClusterPlugin() {
|
||||
@Override
|
||||
public Map<String, ExistingShardsAllocator> getExistingShardsAllocators() {
|
||||
return Collections.singletonMap(allocatorName, new TestGatewayAllocator());
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTableGenerator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
@ -141,7 +142,8 @@ public class ClusterStateHealthTests extends ESTestCase {
|
||||
listenerCalled.await();
|
||||
|
||||
TransportClusterHealthAction action = new TransportClusterHealthAction(transportService,
|
||||
clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator());
|
||||
clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver,
|
||||
new AllocationService(null, new TestGatewayAllocator(), null, null));
|
||||
PlainActionFuture<ClusterHealthResponse> listener = new PlainActionFuture<>();
|
||||
action.execute(new ClusterHealthRequest().waitForGreenStatus(), listener);
|
||||
|
||||
|
@ -172,7 +172,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase {
|
||||
logger.info("--> check that old primary shard does not get promoted to primary again");
|
||||
// kick reroute and wait for all shard states to be fetched
|
||||
client(master).admin().cluster().prepareReroute().get();
|
||||
assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(),
|
||||
assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetches(),
|
||||
equalTo(0)));
|
||||
// kick reroute a second time and check that all shards are unassigned
|
||||
assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(),
|
||||
|
@ -18,14 +18,49 @@
|
||||
*/
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.EmptyClusterInfoService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.gateway.TestGatewayAllocator;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_NO;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING;
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
|
||||
public class AllocationServiceTests extends ESTestCase {
|
||||
@ -74,4 +109,242 @@ public class AllocationServiceTests extends ESTestCase {
|
||||
assertThat(abbreviated, containsString("formatted"));
|
||||
assertThat(abbreviated, not(containsString("original")));
|
||||
}
|
||||
|
||||
public void testAssignsPrimariesInPriorityOrderThenReplicas() {
|
||||
// throttle (incoming) recoveries in order to observe the order of operations, but do not throttle outgoing recoveries since
|
||||
// the effects of that depend on the earlier (random) allocations
|
||||
final Settings settings = Settings.builder()
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1)
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 1)
|
||||
.put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE)
|
||||
.build();
|
||||
final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
final AllocationService allocationService = new AllocationService(
|
||||
new AllocationDeciders(Arrays.asList(
|
||||
new SameShardAllocationDecider(settings, clusterSettings),
|
||||
new ThrottlingAllocationDecider(settings, clusterSettings))),
|
||||
new ShardsAllocator() {
|
||||
@Override
|
||||
public void allocate(RoutingAllocation allocation) {
|
||||
// all primaries are handled by existing shards allocators in these tests; even the invalid allocator prevents shards
|
||||
// from falling through to here
|
||||
assertThat(allocation.routingNodes().unassigned().getNumPrimaries(), equalTo(0));
|
||||
}
|
||||
|
||||
@Override
|
||||
public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) {
|
||||
return ShardAllocationDecision.NOT_TAKEN;
|
||||
}
|
||||
}, new EmptyClusterInfoService());
|
||||
|
||||
final String unrealisticAllocatorName = "unrealistic";
|
||||
final Map<String, ExistingShardsAllocator> allocatorMap = new HashMap<>();
|
||||
final TestGatewayAllocator testGatewayAllocator = new TestGatewayAllocator();
|
||||
allocatorMap.put(GatewayAllocator.ALLOCATOR_NAME, testGatewayAllocator);
|
||||
allocatorMap.put(unrealisticAllocatorName, new UnrealisticAllocator());
|
||||
allocationService.setExistingShardsAllocators(allocatorMap);
|
||||
|
||||
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
nodesBuilder.add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT));
|
||||
nodesBuilder.add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT));
|
||||
nodesBuilder.add(new DiscoveryNode("node3", buildNewFakeTransportAddress(), Version.CURRENT));
|
||||
|
||||
final Metadata.Builder metaData = Metadata.builder()
|
||||
// create 3 indices with different priorities. The high and low priority indices use the default allocator which (in this test)
|
||||
// does not allocate any replicas, whereas the medium priority one uses the unrealistic allocator which does allocate replicas
|
||||
.put(indexMetadata("highPriority", Settings.builder()
|
||||
.put(IndexMetadata.SETTING_PRIORITY, 10)))
|
||||
.put(indexMetadata("mediumPriority", Settings.builder()
|
||||
.put(IndexMetadata.SETTING_PRIORITY, 5)
|
||||
.put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), unrealisticAllocatorName)))
|
||||
.put(indexMetadata("lowPriority", Settings.builder()
|
||||
.put(IndexMetadata.SETTING_PRIORITY, 3)))
|
||||
|
||||
// also create a 4th index with arbitrary priority and an invalid allocator that we expect to ignore
|
||||
.put(indexMetadata("invalid", Settings.builder()
|
||||
.put(IndexMetadata.SETTING_PRIORITY, between(0, 15))
|
||||
.put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), "unknown")));
|
||||
|
||||
final RoutingTable.Builder routingTableBuilder = RoutingTable.builder()
|
||||
.addAsRecovery(metaData.get("highPriority"))
|
||||
.addAsRecovery(metaData.get("mediumPriority"))
|
||||
.addAsRecovery(metaData.get("lowPriority"))
|
||||
.addAsRecovery(metaData.get("invalid"));
|
||||
|
||||
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.nodes(nodesBuilder)
|
||||
.metadata(metaData)
|
||||
.routingTable(routingTableBuilder.build())
|
||||
.build();
|
||||
|
||||
// permit the testGatewayAllocator to allocate primaries to every node
|
||||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
final ShardRouting primaryShard = indexShardRoutingTable.primaryShard();
|
||||
for (DiscoveryNode node : clusterState.nodes()) {
|
||||
testGatewayAllocator.addKnownAllocation(primaryShard.initialize(node.getId(), FAKE_IN_SYNC_ALLOCATION_ID, 0L));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
final ClusterState reroutedState1 = rerouteAndStartShards(allocationService, clusterState);
|
||||
final RoutingTable routingTable1 = reroutedState1.routingTable();
|
||||
// the test harness only permits one recovery per node, so we must have allocated all the high-priority primaries and one of the
|
||||
// medium-priority ones
|
||||
assertThat(routingTable1.shardsWithState(ShardRoutingState.INITIALIZING), empty());
|
||||
assertThat(routingTable1.shardsWithState(ShardRoutingState.RELOCATING), empty());
|
||||
assertTrue(routingTable1.shardsWithState(ShardRoutingState.STARTED).stream().allMatch(ShardRouting::primary));
|
||||
assertThat(routingTable1.index("highPriority").primaryShardsActive(), equalTo(2));
|
||||
assertThat(routingTable1.index("mediumPriority").primaryShardsActive(), equalTo(1));
|
||||
assertThat(routingTable1.index("lowPriority").shardsWithState(ShardRoutingState.STARTED), empty());
|
||||
assertThat(routingTable1.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty());
|
||||
|
||||
final ClusterState reroutedState2 = rerouteAndStartShards(allocationService, reroutedState1);
|
||||
final RoutingTable routingTable2 = reroutedState2.routingTable();
|
||||
// this reroute starts the one remaining medium-priority primary and both of the low-priority ones, but no replicas
|
||||
assertThat(routingTable2.shardsWithState(ShardRoutingState.INITIALIZING), empty());
|
||||
assertThat(routingTable2.shardsWithState(ShardRoutingState.RELOCATING), empty());
|
||||
assertTrue(routingTable2.shardsWithState(ShardRoutingState.STARTED).stream().allMatch(ShardRouting::primary));
|
||||
assertTrue(routingTable2.index("highPriority").allPrimaryShardsActive());
|
||||
assertTrue(routingTable2.index("mediumPriority").allPrimaryShardsActive());
|
||||
assertTrue(routingTable2.index("lowPriority").allPrimaryShardsActive());
|
||||
assertThat(routingTable2.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty());
|
||||
|
||||
final ClusterState reroutedState3 = rerouteAndStartShards(allocationService, reroutedState2);
|
||||
final RoutingTable routingTable3 = reroutedState3.routingTable();
|
||||
// this reroute starts the two medium-priority replicas since their allocator permits this
|
||||
assertThat(routingTable3.shardsWithState(ShardRoutingState.INITIALIZING), empty());
|
||||
assertThat(routingTable3.shardsWithState(ShardRoutingState.RELOCATING), empty());
|
||||
assertTrue(routingTable3.index("highPriority").allPrimaryShardsActive());
|
||||
assertThat(routingTable3.index("mediumPriority").shardsWithState(ShardRoutingState.UNASSIGNED), empty());
|
||||
assertTrue(routingTable3.index("lowPriority").allPrimaryShardsActive());
|
||||
assertThat(routingTable3.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty());
|
||||
}
|
||||
|
||||
public void testExplainsNonAllocationOfShardWithUnknownAllocator() {
|
||||
final AllocationService allocationService = new AllocationService(null, null, null);
|
||||
allocationService.setExistingShardsAllocators(
|
||||
Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, new TestGatewayAllocator()));
|
||||
|
||||
final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder();
|
||||
nodesBuilder.add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT));
|
||||
nodesBuilder.add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT));
|
||||
|
||||
final Metadata.Builder metadata = Metadata.builder().put(indexMetadata("index", Settings.builder()
|
||||
.put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), "unknown")));
|
||||
|
||||
final RoutingTable.Builder routingTableBuilder = RoutingTable.builder().addAsRecovery(metadata.get("index"));
|
||||
|
||||
final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
|
||||
.nodes(nodesBuilder)
|
||||
.metadata(metadata)
|
||||
.routingTable(routingTableBuilder.build())
|
||||
.build();
|
||||
|
||||
final RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()),
|
||||
clusterState.getRoutingNodes(), clusterState, ClusterInfo.EMPTY, 0L);
|
||||
allocation.setDebugMode(randomBoolean() ? RoutingAllocation.DebugMode.ON : RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS);
|
||||
|
||||
final ShardAllocationDecision shardAllocationDecision
|
||||
= allocationService.explainShardAllocation(clusterState.routingTable().index("index").shard(0).primaryShard(), allocation);
|
||||
|
||||
assertTrue(shardAllocationDecision.isDecisionTaken());
|
||||
assertThat(shardAllocationDecision.getAllocateDecision().getAllocationStatus(),
|
||||
equalTo(UnassignedInfo.AllocationStatus.NO_VALID_SHARD_COPY));
|
||||
assertThat(shardAllocationDecision.getAllocateDecision().getAllocationDecision(),
|
||||
equalTo(AllocationDecision.NO_VALID_SHARD_COPY));
|
||||
assertThat(shardAllocationDecision.getAllocateDecision().getExplanation(), equalTo("cannot allocate because a previous copy of " +
|
||||
"the primary shard existed but can no longer be found on the nodes in the cluster"));
|
||||
|
||||
for (NodeAllocationResult nodeAllocationResult : shardAllocationDecision.getAllocateDecision().nodeDecisions) {
|
||||
assertThat(nodeAllocationResult.getNodeDecision(), equalTo(AllocationDecision.NO));
|
||||
assertThat(nodeAllocationResult.getCanAllocateDecision().type(), equalTo(Decision.Type.NO));
|
||||
assertThat(nodeAllocationResult.getCanAllocateDecision().label(), equalTo("allocator_plugin"));
|
||||
assertThat(nodeAllocationResult.getCanAllocateDecision().getExplanation(), equalTo("finding the previous copies of this " +
|
||||
"shard requires an allocator called [unknown] but that allocator was not found; perhaps the corresponding plugin is " +
|
||||
"not installed"));
|
||||
}
|
||||
}
|
||||
|
||||
private static final String FAKE_IN_SYNC_ALLOCATION_ID = "_in_sync_"; // so we can allocate primaries anywhere
|
||||
|
||||
private static IndexMetadata.Builder indexMetadata(String name, Settings.Builder settings) {
|
||||
return IndexMetadata.builder(name)
|
||||
.settings(settings(Version.CURRENT).put(settings.build()))
|
||||
.numberOfShards(2).numberOfReplicas(1)
|
||||
.putInSyncAllocationIds(0, Collections.singleton(FAKE_IN_SYNC_ALLOCATION_ID))
|
||||
.putInSyncAllocationIds(1, Collections.singleton(FAKE_IN_SYNC_ALLOCATION_ID));
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocates shards to nodes regardless of whether there's already a shard copy there.
|
||||
*/
|
||||
private static class UnrealisticAllocator implements ExistingShardsAllocator {
|
||||
|
||||
@Override
|
||||
public void beforeAllocation(RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
final AllocateUnassignedDecision allocateUnassignedDecision = explainUnassignedShardAllocation(shardRouting, allocation);
|
||||
if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) {
|
||||
unassignedAllocationHandler.initialize(allocateUnassignedDecision.getTargetNode().getId(),
|
||||
shardRouting.primary() ? FAKE_IN_SYNC_ALLOCATION_ID : null, 0L, allocation.changes());
|
||||
} else {
|
||||
unassignedAllocationHandler.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes());
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting shardRouting, RoutingAllocation allocation) {
|
||||
boolean throttled = false;
|
||||
|
||||
for (final RoutingNode routingNode : allocation.routingNodes()) {
|
||||
final Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
|
||||
if (decision.type() == Decision.Type.YES) {
|
||||
return AllocateUnassignedDecision.yes(routingNode.node(), null, null, false);
|
||||
} else {
|
||||
if (shardRouting.index().getName().equals("mediumPriority") && shardRouting.primary() == false
|
||||
&& decision.type() == Decision.Type.THROTTLE) {
|
||||
allocation.deciders().canAllocate(shardRouting, routingNode, allocation);
|
||||
}
|
||||
}
|
||||
|
||||
throttled = throttled || decision.type() == Decision.Type.THROTTLE;
|
||||
}
|
||||
|
||||
return throttled ? AllocateUnassignedDecision.throttle(null)
|
||||
: AllocateUnassignedDecision.no(DECIDERS_NO, null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void cleanCaches() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumberOfInFlightFetches() {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
private static ClusterState rerouteAndStartShards(final AllocationService allocationService, final ClusterState clusterState) {
|
||||
final ClusterState reroutedState = allocationService.reroute(clusterState, "test");
|
||||
return allocationService.applyStartedShards(reroutedState,
|
||||
reroutedState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING));
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -566,19 +566,13 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
|
||||
|
||||
AllocationService strategy = createAllocationService(Settings.EMPTY, new TestGatewayAllocator() {
|
||||
@Override
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
if (allocateTest1.get() == false) {
|
||||
RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned();
|
||||
RoutingNodes.UnassignedShards.UnassignedIterator iterator = unassigned.iterator();
|
||||
while (iterator.hasNext()) {
|
||||
ShardRouting next = iterator.next();
|
||||
if ("test1".equals(next.index().getName())) {
|
||||
iterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes());
|
||||
}
|
||||
|
||||
}
|
||||
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
if (allocateTest1.get() == false && "test1".equals(shardRouting.index().getName())) {
|
||||
unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes());
|
||||
} else {
|
||||
super.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler);
|
||||
}
|
||||
super.allocateUnassigned(allocation);
|
||||
}
|
||||
});
|
||||
|
||||
@ -667,11 +661,10 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase {
|
||||
.put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(),
|
||||
ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new TestGatewayAllocator() {
|
||||
@Override
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
public void beforeAllocation(RoutingAllocation allocation) {
|
||||
if (hasFetches.get()) {
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
}
|
||||
super.allocateUnassigned(allocation);
|
||||
}
|
||||
});
|
||||
|
||||
|
@ -18,14 +18,78 @@
|
||||
*/
|
||||
package org.elasticsearch.common.blobstore.fs;
|
||||
|
||||
import org.apache.lucene.mockfile.FilterFileSystemProvider;
|
||||
import org.apache.lucene.mockfile.FilterSeekableByteChannel;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.io.PathUtilsForTesting;
|
||||
import org.elasticsearch.common.io.Streams;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.SeekableByteChannel;
|
||||
import java.nio.file.FileSystem;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.OpenOption;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileAttribute;
|
||||
import java.nio.file.spi.FileSystemProvider;
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.function.Consumer;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("*") // we do our own mocking
|
||||
public class FsBlobContainerTests extends ESTestCase {
|
||||
|
||||
final AtomicLong totalBytesRead = new AtomicLong(0);
|
||||
FileSystem fileSystem = null;
|
||||
|
||||
@Before
|
||||
public void setupMockFileSystems() {
|
||||
FileSystemProvider fileSystemProvider = new MockFileSystemProvider(PathUtils.getDefaultFileSystem(), totalBytesRead::addAndGet);
|
||||
fileSystem = fileSystemProvider.getFileSystem(null);
|
||||
PathUtilsForTesting.installMock(fileSystem); // restored by restoreFileSystem in ESTestCase
|
||||
}
|
||||
|
||||
@After
|
||||
public void closeMockFileSystems() throws IOException {
|
||||
IOUtils.close(fileSystem);
|
||||
}
|
||||
|
||||
public void testReadBlobRangeCorrectlySkipBytes() throws IOException {
|
||||
final String blobName = randomAlphaOfLengthBetween(1, 20).toLowerCase(Locale.ROOT);
|
||||
final byte[] blobData = randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb
|
||||
|
||||
final Path path = PathUtils.get(createTempDir().toString());
|
||||
Files.write(path.resolve(blobName), blobData);
|
||||
|
||||
final FsBlobContainer container = new FsBlobContainer(new FsBlobStore(Settings.EMPTY, path, false), BlobPath.cleanPath(), path);
|
||||
assertThat(totalBytesRead.get(), equalTo(0L));
|
||||
|
||||
final long start = randomLongBetween(0L, Math.max(0L, blobData.length - 1));
|
||||
final long length = randomLongBetween(1L, blobData.length - start);
|
||||
|
||||
try (InputStream stream = container.readBlob(blobName, start, length)) {
|
||||
assertThat(totalBytesRead.get(), equalTo(0L));
|
||||
assertThat(Streams.consumeFully(stream), equalTo(length));
|
||||
assertThat(totalBytesRead.get(), equalTo(length));
|
||||
}
|
||||
}
|
||||
|
||||
public void testTempBlobName() {
|
||||
final String blobName = randomAlphaOfLengthBetween(1, 20);
|
||||
final String tempBlobName = FsBlobContainer.tempBlobName(blobName);
|
||||
@ -37,4 +101,48 @@ public class FsBlobContainerTests extends ESTestCase {
|
||||
final String tempBlobName = FsBlobContainer.tempBlobName(randomAlphaOfLengthBetween(1, 20));
|
||||
assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true));
|
||||
}
|
||||
|
||||
static class MockFileSystemProvider extends FilterFileSystemProvider {
|
||||
|
||||
final Consumer<Long> onRead;
|
||||
|
||||
MockFileSystemProvider(FileSystem inner, Consumer<Long> onRead) {
|
||||
super("mockfs://", inner);
|
||||
this.onRead = onRead;
|
||||
}
|
||||
|
||||
private int onRead(int read) {
|
||||
if (read != -1) {
|
||||
onRead.accept((long) read);
|
||||
}
|
||||
return read;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SeekableByteChannel newByteChannel(Path path, Set<? extends OpenOption> opts, FileAttribute<?>... attrs) throws IOException {
|
||||
return new FilterSeekableByteChannel(super.newByteChannel(path, opts, attrs)) {
|
||||
@Override
|
||||
public int read(ByteBuffer dst) throws IOException {
|
||||
return onRead(super.read(dst));
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream newInputStream(Path path, OpenOption... opts) throws IOException {
|
||||
// no super.newInputStream(path, opts) as it will use the delegating FileSystem to open a SeekableByteChannel
|
||||
// and instead we want the mocked newByteChannel() method to be used
|
||||
return new FilterInputStream(delegate.newInputStream(path, opts)) {
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
return onRead(super.read());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(byte[] b, int off, int len) throws IOException {
|
||||
return onRead(super.read(b, off, len));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -79,4 +79,20 @@ public class StreamsTests extends ESTestCase {
|
||||
assertEquals(-1, input.read());
|
||||
input.close();
|
||||
}
|
||||
|
||||
public void testFullyConsumeInputStream() throws IOException {
|
||||
final String bytes = randomAlphaOfLengthBetween(0, 100);
|
||||
final BytesArray stuffArray = new BytesArray(bytes);
|
||||
assertEquals(bytes.length(), Streams.consumeFully(stuffArray.streamInput()));
|
||||
}
|
||||
|
||||
public void testLimitInputStream() throws IOException {
|
||||
final byte[] bytes = randomAlphaOfLengthBetween(1, 100).getBytes(StandardCharsets.UTF_8);
|
||||
final int limit = randomIntBetween(0, bytes.length);
|
||||
final BytesArray stuffArray = new BytesArray(bytes);
|
||||
final ByteArrayOutputStream out = new ByteArrayOutputStream(bytes.length);
|
||||
final long count = Streams.copy(Streams.limitStream(stuffArray.streamInput(), limit), out);
|
||||
assertEquals(limit, count);
|
||||
assertThat(Arrays.equals(out.toByteArray(), Arrays.copyOf(bytes, limit)), equalTo(true));
|
||||
}
|
||||
}
|
||||
|
@ -19,15 +19,12 @@
|
||||
|
||||
package org.elasticsearch.common.lucene.store;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class ByteArrayIndexInputTests extends ESTestCase {
|
||||
public class ByteArrayIndexInputTests extends ESIndexInputTestCase {
|
||||
public void testRandomReads() throws IOException {
|
||||
for (int i = 0; i < 100; i++) {
|
||||
byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(StandardCharsets.UTF_8);
|
||||
@ -87,47 +84,5 @@ public class ByteArrayIndexInputTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private byte[] randomReadAndSlice(IndexInput indexInput, int length) throws IOException {
|
||||
int readPos = (int) indexInput.getFilePointer();
|
||||
byte[] output = new byte[length];
|
||||
while (readPos < length) {
|
||||
switch (randomIntBetween(0, 3)) {
|
||||
case 0:
|
||||
// Read by one byte at a time
|
||||
output[readPos++] = indexInput.readByte();
|
||||
break;
|
||||
case 1:
|
||||
// Read several bytes into target
|
||||
int len = randomIntBetween(1, length - readPos);
|
||||
indexInput.readBytes(output, readPos, len);
|
||||
readPos += len;
|
||||
break;
|
||||
case 2:
|
||||
// Read several bytes into 0-offset target
|
||||
len = randomIntBetween(1, length - readPos);
|
||||
byte[] temp = new byte[len];
|
||||
indexInput.readBytes(temp, 0, len);
|
||||
System.arraycopy(temp, 0, output, readPos, len);
|
||||
readPos += len;
|
||||
break;
|
||||
case 3:
|
||||
// Read using slice
|
||||
len = randomIntBetween(1, length - readPos);
|
||||
IndexInput slice = indexInput.slice("slice (" + readPos + ", " + len + ") of " + indexInput.toString(), readPos, len);
|
||||
temp = randomReadAndSlice(slice, len);
|
||||
// assert that position in the original input didn't change
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
System.arraycopy(temp, 0, output, readPos, len);
|
||||
readPos += len;
|
||||
indexInput.seek(readPos);
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
break;
|
||||
default:
|
||||
fail();
|
||||
}
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
}
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -78,12 +78,19 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
this.testAllocator = new TestAllocator();
|
||||
}
|
||||
|
||||
private void allocateAllUnassigned(final RoutingAllocation allocation) {
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
testAllocator.allocateUnassigned(iterator.next(), allocation, iterator);
|
||||
}
|
||||
}
|
||||
|
||||
public void testNoProcessPrimaryNotAllocatedBefore() {
|
||||
final RoutingAllocation allocation;
|
||||
// with old version, we can't know if a shard was allocated before or not
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(),
|
||||
randomFrom(INDEX_CREATED, CLUSTER_RECOVERED, INDEX_REOPENED));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId));
|
||||
@ -96,7 +103,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testNoAsyncFetchData() {
|
||||
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
"allocId");
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
@ -111,7 +118,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
final RoutingAllocation allocation =
|
||||
routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "allocId");
|
||||
testAllocator.addData(node1, null, randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
@ -125,7 +132,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testNoMatchingAllocationIdFound() {
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "id2");
|
||||
testAllocator.addData(node1, "id1", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
@ -139,7 +146,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
"allocId1");
|
||||
testAllocator.addData(node1, "allocId1", randomBoolean(), new CorruptIndexException("test", "test"));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
@ -153,7 +160,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
"allocId1");
|
||||
testAllocator.addData(node1, "allocId1", randomBoolean(), new ShardLockObtainFailedException(shardId, "test"));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -177,7 +184,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
testAllocator.addData(node1, allocId1, randomBoolean(),
|
||||
new ShardLockObtainFailedException(shardId, "test"));
|
||||
testAllocator.addData(node2, allocId2, randomBoolean(), null);
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -196,7 +203,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(),
|
||||
randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED), "allocId1");
|
||||
testAllocator.addData(node1, "allocId1", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -221,7 +228,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
new TestAllocateDecision(randomBoolean() ? Decision.YES : Decision.NO), getNoDeciderThatAllowsForceAllocate()
|
||||
));
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1");
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertTrue(allocation.routingNodes().unassigned().ignored().isEmpty());
|
||||
assertEquals(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), 1);
|
||||
@ -244,7 +251,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
getNoDeciderThatThrottlesForceAllocate()
|
||||
));
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1");
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
List<ShardRouting> ignored = allocation.routingNodes().unassigned().ignored();
|
||||
assertEquals(ignored.size(), 1);
|
||||
@ -268,7 +275,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
new TestAllocateDecision(Decision.THROTTLE), getNoDeciderThatAllowsForceAllocate()
|
||||
));
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1");
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
List<ShardRouting> ignored = allocation.routingNodes().unassigned().ignored();
|
||||
assertEquals(ignored.size(), 1);
|
||||
@ -287,7 +294,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
boolean node1HasPrimaryShard = randomBoolean();
|
||||
testAllocator.addData(node1, node1HasPrimaryShard ? primaryAllocId : replicaAllocId, node1HasPrimaryShard);
|
||||
testAllocator.addData(node2, node1HasPrimaryShard ? replicaAllocId : primaryAllocId, !node1HasPrimaryShard);
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -305,7 +312,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
"allocId1");
|
||||
testAllocator.addData(node1, "allocId1", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
@ -320,7 +327,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), CLUSTER_RECOVERED,
|
||||
"allocId1");
|
||||
testAllocator.addData(node1, "allocId1", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -336,7 +343,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testRestore() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, "some allocId", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -350,7 +357,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testRestoreThrottle() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, "some allocId", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
|
||||
assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW);
|
||||
@ -363,7 +370,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testRestoreForcesAllocateIfShardAvailable() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, "some allocId", randomBoolean());
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
@ -377,7 +384,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testRestoreDoesNotAssignIfNoShardAvailable() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), "allocId");
|
||||
testAllocator.addData(node1, null, false);
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
|
||||
|
@ -87,13 +87,20 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
this.testAllocator = new TestAllocator();
|
||||
}
|
||||
|
||||
private void allocateAllUnassigned(final RoutingAllocation allocation) {
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator();
|
||||
while (iterator.hasNext()) {
|
||||
testAllocator.allocateUnassigned(iterator.next(), allocation, iterator);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that when we are still fetching data in an async manner, the replica shard moves to ignore unassigned.
|
||||
*/
|
||||
public void testNoAsyncFetchData() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.clean();
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
}
|
||||
@ -106,7 +113,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY,
|
||||
UnassignedInfo.Reason.INDEX_CREATED);
|
||||
testAllocator.clean();
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(testAllocator.getFetchDataCalledAndClean(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
@ -121,7 +128,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED)));
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, reason);
|
||||
testAllocator.clean();
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat("failed with reason " + reason, testAllocator.getFetchDataCalledAndClean(), equalTo(true));
|
||||
}
|
||||
|
||||
@ -133,7 +140,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(nodeToMatch, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
|
||||
equalTo(nodeToMatch.getId()));
|
||||
@ -147,7 +154,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(nodeToMatch, "MATCH", new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM" ,MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
|
||||
equalTo(nodeToMatch.getId()));
|
||||
@ -161,7 +168,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3;
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(nodeToMatch, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
|
||||
equalTo(nodeToMatch.getId()));
|
||||
@ -179,7 +186,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.addData(node2, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
|
||||
equalTo(node3.getId()));
|
||||
@ -253,7 +260,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.addData(node2, null); // has retention lease but store is empty
|
||||
testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
|
||||
equalTo(node3.getId()));
|
||||
@ -268,7 +275,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testNoPrimaryData() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
}
|
||||
@ -280,7 +287,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
public void testNoDataForReplicaOnAnyNode() {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
}
|
||||
@ -293,7 +300,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId));
|
||||
}
|
||||
@ -307,7 +314,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders());
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
}
|
||||
@ -332,7 +339,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
})));
|
||||
testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION))
|
||||
.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
}
|
||||
@ -346,7 +353,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
// we sometime return empty list of files, make sure we test this as well
|
||||
testAllocator.addData(node2, null);
|
||||
}
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId));
|
||||
@ -355,7 +362,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase {
|
||||
Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(),
|
||||
TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT);
|
||||
testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION));
|
||||
testAllocator.allocateUnassigned(allocation);
|
||||
allocateAllUnassigned(allocation);
|
||||
assertThat(allocation.routingNodesChanged(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(),
|
||||
|
@ -8,3 +8,12 @@ services:
|
||||
- ./testfixtures_shared/shared:/fixture/shared
|
||||
ports:
|
||||
- "8091"
|
||||
|
||||
azure-fixture-other:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile
|
||||
volumes:
|
||||
- ./testfixtures_shared/shared:/fixture/shared
|
||||
ports:
|
||||
- "8091"
|
||||
|
15
test/fixtures/s3-fixture/docker-compose.yml
vendored
15
test/fixtures/s3-fixture/docker-compose.yml
vendored
@ -15,6 +15,21 @@ services:
|
||||
ports:
|
||||
- "80"
|
||||
|
||||
s3-fixture-other:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
fixtureClass: fixture.s3.S3HttpFixture
|
||||
port: 80
|
||||
bucket: "bucket"
|
||||
basePath: "base_path"
|
||||
accessKey: "access_key"
|
||||
dockerfile: Dockerfile
|
||||
volumes:
|
||||
- ./testfixtures_shared/shared:/fixture/shared
|
||||
ports:
|
||||
- "80"
|
||||
|
||||
s3-fixture-with-session-token:
|
||||
build:
|
||||
context: .
|
||||
|
@ -216,13 +216,13 @@ public class S3HttpHandler implements HttpHandler {
|
||||
|
||||
final int start = Integer.parseInt(matcher.group(1));
|
||||
final int end = Integer.parseInt(matcher.group(2));
|
||||
final int length = end - start;
|
||||
|
||||
final BytesReference rangeBlob = blob.slice(start, end + 1 - start);
|
||||
exchange.getResponseHeaders().add("Content-Type", "application/octet-stream");
|
||||
exchange.getResponseHeaders().add("Content-Range",
|
||||
String.format(Locale.ROOT, "bytes=%d-%d/%d", start, end, blob.length()));
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length);
|
||||
exchange.getResponseBody().write(BytesReference.toBytes(blob), start, length);
|
||||
exchange.getResponseHeaders().add("Content-Range", String.format(Locale.ROOT, "bytes %d-%d/%d",
|
||||
start, end, rangeBlob.length()));
|
||||
exchange.sendResponseHeaders(RestStatus.OK.getStatus(), rangeBlob.length());
|
||||
rangeBlob.writeTo(exchange.getResponseBody());
|
||||
}
|
||||
} else {
|
||||
exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1);
|
||||
|
@ -23,7 +23,6 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodeRole;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
@ -258,26 +257,33 @@ public abstract class ESAllocationTestCase extends ESTestCase {
|
||||
public DelayedShardsMockGatewayAllocator() {}
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
|
||||
public void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
|
||||
public void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shard = unassignedIterator.next();
|
||||
if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
continue;
|
||||
}
|
||||
if (shard.unassignedInfo().isDelayed()) {
|
||||
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes());
|
||||
}
|
||||
public void beforeAllocation(RoutingAllocation allocation) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {
|
||||
// no-op
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
if (shardRouting.primary() || shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
return;
|
||||
}
|
||||
if (shardRouting.unassignedInfo().isDelayed()) {
|
||||
unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,192 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.lucene.store;
|
||||
|
||||
import org.apache.lucene.store.IndexInput;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.PlainActionFuture;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
/**
|
||||
* Test harness for verifying {@link IndexInput} implementations.
|
||||
*/
|
||||
public class ESIndexInputTestCase extends ESTestCase {
|
||||
|
||||
private static EsThreadPoolExecutor executor;
|
||||
|
||||
@BeforeClass
|
||||
public static void createExecutor() {
|
||||
final String name = "TEST-" + getTestClass().getSimpleName() + "#randomReadAndSlice";
|
||||
executor = EsExecutors.newFixed(name, 10, 0, EsExecutors.daemonThreadFactory(name), new ThreadContext(Settings.EMPTY));
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void destroyExecutor() {
|
||||
executor.shutdown();
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the contents of an {@link IndexInput} from {@code indexInput.getFilePointer()} to {@code length} using a wide variety of
|
||||
* different access methods. Returns an array of length {@code length} containing the bytes that were read starting at index
|
||||
* {@code indexInput.getFilePointer()}. The bytes of the returned array with indices below the initial value of
|
||||
* {@code indexInput.getFilePointer()} may contain anything. The final value of {@code indexInput.getFilePointer()} is {@code length}.
|
||||
*/
|
||||
protected byte[] randomReadAndSlice(IndexInput indexInput, int length) throws IOException {
|
||||
int readPos = (int) indexInput.getFilePointer();
|
||||
byte[] output = new byte[length];
|
||||
while (readPos < length) {
|
||||
switch (randomIntBetween(0, 5)) {
|
||||
case 0:
|
||||
// Read by one byte at a time
|
||||
output[readPos++] = indexInput.readByte();
|
||||
break;
|
||||
case 1:
|
||||
// Read several bytes into target
|
||||
int len = randomIntBetween(1, length - readPos);
|
||||
indexInput.readBytes(output, readPos, len);
|
||||
readPos += len;
|
||||
break;
|
||||
case 2:
|
||||
// Read several bytes into 0-offset target
|
||||
len = randomIntBetween(1, length - readPos);
|
||||
byte[] temp = new byte[len];
|
||||
indexInput.readBytes(temp, 0, len);
|
||||
System.arraycopy(temp, 0, output, readPos, len);
|
||||
readPos += len;
|
||||
break;
|
||||
case 3:
|
||||
// Read using slice
|
||||
len = randomIntBetween(1, length - readPos);
|
||||
IndexInput slice = indexInput.slice("slice (" + readPos + ", " + len + ") of " + indexInput, readPos, len);
|
||||
temp = randomReadAndSlice(slice, len);
|
||||
// assert that position in the original input didn't change
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
System.arraycopy(temp, 0, output, readPos, len);
|
||||
readPos += len;
|
||||
indexInput.seek(readPos);
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
break;
|
||||
case 4:
|
||||
// Seek at a random position and read a single byte,
|
||||
// then seek back to original position
|
||||
final int lastReadPos = readPos;
|
||||
readPos = randomIntBetween(0, length - 1);
|
||||
indexInput.seek(readPos);
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
final int bytesToRead = 1;
|
||||
temp = randomReadAndSlice(indexInput, readPos + bytesToRead);
|
||||
System.arraycopy(temp, readPos, output, readPos, bytesToRead);
|
||||
readPos = lastReadPos;
|
||||
indexInput.seek(readPos);
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
break;
|
||||
case 5:
|
||||
// Read clone or slice concurrently
|
||||
final int cloneCount = between(1, 3);
|
||||
final CountDownLatch startLatch = new CountDownLatch(1 + cloneCount);
|
||||
final CountDownLatch finishLatch = new CountDownLatch(cloneCount);
|
||||
|
||||
final PlainActionFuture<byte[]> mainThreadResultFuture = new PlainActionFuture<>();
|
||||
final int mainThreadReadStart = readPos;
|
||||
final int mainThreadReadEnd = randomIntBetween(readPos + 1, length);
|
||||
|
||||
for (int i = 0; i < cloneCount; i++) {
|
||||
executor.execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
final IndexInput clone;
|
||||
final int readStart = between(0, length);
|
||||
final int readEnd = between(readStart, length);
|
||||
if (randomBoolean()) {
|
||||
clone = indexInput.clone();
|
||||
} else {
|
||||
final int sliceEnd = between(readEnd, length);
|
||||
clone = indexInput.slice("concurrent slice (0, " + sliceEnd + ") of " + indexInput, 0L, sliceEnd);
|
||||
}
|
||||
startLatch.countDown();
|
||||
startLatch.await();
|
||||
clone.seek(readStart);
|
||||
final byte[] cloneResult = randomReadAndSlice(clone, readEnd);
|
||||
if (randomBoolean()) {
|
||||
clone.close();
|
||||
}
|
||||
|
||||
// the read from the clone should agree with the read from the main input on their overlap
|
||||
final int maxStart = Math.max(mainThreadReadStart, readStart);
|
||||
final int minEnd = Math.min(mainThreadReadEnd, readEnd);
|
||||
if (maxStart < minEnd) {
|
||||
final byte[] mainThreadResult = mainThreadResultFuture.actionGet();
|
||||
final int overlapLen = minEnd - maxStart;
|
||||
final byte[] fromMainThread = new byte[overlapLen];
|
||||
final byte[] fromClone = new byte[overlapLen];
|
||||
System.arraycopy(mainThreadResult, maxStart, fromMainThread, 0, overlapLen);
|
||||
System.arraycopy(cloneResult, maxStart, fromClone, 0, overlapLen);
|
||||
assertArrayEquals(fromMainThread, fromClone);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onAfter() {
|
||||
finishLatch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRejection(Exception e) {
|
||||
// all threads are busy, and queueing can lead this test to deadlock, so we need take no action
|
||||
startLatch.countDown();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
try {
|
||||
startLatch.countDown();
|
||||
startLatch.await();
|
||||
ActionListener.completeWith(mainThreadResultFuture, () -> randomReadAndSlice(indexInput, mainThreadReadEnd));
|
||||
System.arraycopy(mainThreadResultFuture.actionGet(), readPos, output, readPos, mainThreadReadEnd - readPos);
|
||||
readPos = mainThreadReadEnd;
|
||||
finishLatch.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
fail();
|
||||
}
|
||||
assertEquals(readPos, indexInput.getFilePointer());
|
||||
}
|
||||
assertEquals(length, indexInput.getFilePointer());
|
||||
return output;
|
||||
}
|
||||
|
||||
}
|
@ -45,6 +45,16 @@ public class BlobContainerWrapper implements BlobContainer {
|
||||
return delegate.readBlob(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream readBlob(String blobName, long position, long length) throws IOException {
|
||||
return delegate.readBlob(blobName, position, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long readBlobPreferredLength() {
|
||||
return delegate.readBlobPreferredLength();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
|
||||
delegate.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
|
||||
|
@ -95,13 +95,13 @@ public class TestGatewayAllocator extends GatewayAllocator {
|
||||
};
|
||||
|
||||
@Override
|
||||
public void applyStartedShards(RoutingAllocation allocation, List<ShardRouting> startedShards) {
|
||||
public void applyStartedShards(List<ShardRouting> startedShards, RoutingAllocation allocation) {
|
||||
currentNodes = allocation.nodes();
|
||||
allocation.routingNodes().shards(ShardRouting::active).forEach(this::addKnownAllocation);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void applyFailedShards(RoutingAllocation allocation, List<FailedShard> failedShards) {
|
||||
public void applyFailedShards(List<FailedShard> failedShards, RoutingAllocation allocation) {
|
||||
currentNodes = allocation.nodes();
|
||||
for (FailedShard failedShard : failedShards) {
|
||||
final ShardRouting failedRouting = failedShard.getRoutingEntry();
|
||||
@ -116,9 +116,18 @@ public class TestGatewayAllocator extends GatewayAllocator {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
public void beforeAllocation(RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation,
|
||||
UnassignedAllocationHandler unassignedAllocationHandler) {
|
||||
currentNodes = allocation.nodes();
|
||||
innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator);
|
||||
innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator, shardRouting, unassignedAllocationHandler);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -63,6 +63,7 @@ import org.elasticsearch.xpack.core.ilm.LifecycleAction;
|
||||
import org.elasticsearch.xpack.core.ilm.LifecycleType;
|
||||
import org.elasticsearch.xpack.core.ilm.ReadOnlyAction;
|
||||
import org.elasticsearch.xpack.core.ilm.RolloverAction;
|
||||
import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction;
|
||||
import org.elasticsearch.xpack.core.ilm.SetPriorityAction;
|
||||
import org.elasticsearch.xpack.core.ilm.ShrinkAction;
|
||||
import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType;
|
||||
@ -606,6 +607,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, WaitForSnapshotAction.NAME, WaitForSnapshotAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new),
|
||||
// Transforms
|
||||
new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new),
|
||||
new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new),
|
||||
|
@ -0,0 +1,105 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.apache.lucene.util.SetOnce;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This step wraps an {@link AsyncActionStep} in order to be able to manipulate what the next step will be, depending on the result of the
|
||||
* wrapped {@link AsyncActionStep}.
|
||||
* <p>
|
||||
* If the action response is complete, the {@link AsyncActionBranchingStep}'s nextStepKey will be the nextStepKey of the wrapped action. If
|
||||
* the response is incomplete the nextStepKey will be the provided {@link AsyncActionBranchingStep#nextKeyOnIncompleteResponse}.
|
||||
* Failures encountered whilst executing the wrapped action will be propagated directly.
|
||||
*/
|
||||
public class AsyncActionBranchingStep extends AsyncActionStep {
|
||||
private final AsyncActionStep stepToExecute;
|
||||
|
||||
private StepKey nextKeyOnIncompleteResponse;
|
||||
private SetOnce<Boolean> onResponseResult;
|
||||
|
||||
public AsyncActionBranchingStep(AsyncActionStep stepToExecute, StepKey nextKeyOnIncompleteResponse, Client client) {
|
||||
// super.nextStepKey is set to null since it is not used by this step
|
||||
super(stepToExecute.getKey(), null, client);
|
||||
this.stepToExecute = stepToExecute;
|
||||
this.nextKeyOnIncompleteResponse = nextKeyOnIncompleteResponse;
|
||||
this.onResponseResult = new SetOnce<>();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRetryable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer,
|
||||
Listener listener) {
|
||||
stepToExecute.performAction(indexMetadata, currentClusterState, observer, new Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
onResponseResult.set(complete);
|
||||
listener.onResponse(complete);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@Override
|
||||
public final StepKey getNextStepKey() {
|
||||
if (onResponseResult.get() == null) {
|
||||
throw new IllegalStateException("cannot call getNextStepKey before performAction");
|
||||
}
|
||||
return onResponseResult.get() ? stepToExecute.getNextStepKey() : nextKeyOnIncompleteResponse;
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents the {@link AsyncActionStep} that's wrapped by this branching step.
|
||||
*/
|
||||
AsyncActionStep getStepToExecute() {
|
||||
return stepToExecute;
|
||||
}
|
||||
|
||||
/**
|
||||
* The step key to be reported as the {@link AsyncActionBranchingStep#getNextStepKey()} if the response of the wrapped
|
||||
* {@link AsyncActionBranchingStep#getStepToExecute()} is incomplete.
|
||||
*/
|
||||
StepKey getNextKeyOnIncompleteResponse() {
|
||||
return nextKeyOnIncompleteResponse;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
if (!super.equals(o)) {
|
||||
return false;
|
||||
}
|
||||
AsyncActionBranchingStep that = (AsyncActionBranchingStep) o;
|
||||
return super.equals(o)
|
||||
&& Objects.equals(stepToExecute, that.stepToExecute)
|
||||
&& Objects.equals(nextKeyOnIncompleteResponse, that.nextKeyOnIncompleteResponse);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), stepToExecute, nextKeyOnIncompleteResponse);
|
||||
}
|
||||
}
|
@ -33,8 +33,8 @@ public abstract class AsyncRetryDuringSnapshotActionStep extends AsyncActionStep
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState,
|
||||
ClusterStateObserver observer, Listener listener) {
|
||||
public final void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState,
|
||||
ClusterStateObserver observer, Listener listener) {
|
||||
// Wrap the original listener to handle exceptions caused by ongoing snapshots
|
||||
SnapshotExceptionListener snapshotExceptionListener = new SnapshotExceptionListener(indexMetadata.getIndex(), listener, observer);
|
||||
performDuringNoSnapshot(indexMetadata, currentClusterState, snapshotExceptionListener);
|
||||
|
@ -0,0 +1,82 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.repositories.RepositoryMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata;
|
||||
|
||||
/**
|
||||
* Deletes the snapshot designated by the repository and snapshot name present in the lifecycle execution state.
|
||||
*/
|
||||
public class CleanupSnapshotStep extends AsyncRetryDuringSnapshotActionStep {
|
||||
public static final String NAME = "cleanup-snapshot";
|
||||
|
||||
public CleanupSnapshotStep(StepKey key, StepKey nextStepKey, Client client) {
|
||||
super(key, nextStepKey, client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRetryable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, Listener listener) {
|
||||
final String indexName = indexMetadata.getIndex().getName();
|
||||
|
||||
LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetadata);
|
||||
final String repositoryName = lifecycleState.getSnapshotRepository();
|
||||
// if the snapshot information is missing from the ILM execution state there is nothing to delete so we move on
|
||||
if (Strings.hasText(repositoryName) == false) {
|
||||
listener.onResponse(true);
|
||||
return;
|
||||
}
|
||||
final String snapshotName = lifecycleState.getSnapshotName();
|
||||
if (Strings.hasText(snapshotName) == false) {
|
||||
listener.onResponse(true);
|
||||
return;
|
||||
}
|
||||
DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(repositoryName, snapshotName);
|
||||
getClient().admin().cluster().deleteSnapshot(deleteSnapshotRequest, new ActionListener<AcknowledgedResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(AcknowledgedResponse acknowledgedResponse) {
|
||||
if (acknowledgedResponse.isAcknowledged() == false) {
|
||||
String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME);
|
||||
throw new ElasticsearchException("cleanup snapshot step request for repository [" + repositoryName + "] and snapshot " +
|
||||
"[" + snapshotName + "] policy [" + policyName + "] and index [" + indexName + "] failed to be acknowledged");
|
||||
}
|
||||
listener.onResponse(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
if (e instanceof SnapshotMissingException) {
|
||||
// during the happy flow we generate a snapshot name and that snapshot doesn't exist in the repository
|
||||
listener.onResponse(true);
|
||||
} else {
|
||||
if (e instanceof RepositoryMissingException) {
|
||||
String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME);
|
||||
listener.onFailure(new IllegalStateException("repository [" + repositoryName + "] is missing. [" + policyName +
|
||||
"] policy for index [" + indexName + "] cannot continue until the repository is created", e));
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@ -19,23 +19,32 @@ import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTO
|
||||
|
||||
/**
|
||||
* Copies the execution state data from one index to another, typically after a
|
||||
* new index has been created. Useful for actions such as shrink.
|
||||
* new index has been created. As part of the execution state copy it will set the target index
|
||||
* "current step" to the provided step name (part of the same phase and action as the current step's, unless
|
||||
* the "complete" step is configured in which case the action will be changed to "complete" as well)
|
||||
*
|
||||
* Useful for actions such as shrink.
|
||||
*/
|
||||
public class CopyExecutionStateStep extends ClusterStateActionStep {
|
||||
public static final String NAME = "copy-execution-state";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(CopyExecutionStateStep.class);
|
||||
|
||||
private String shrunkIndexPrefix;
|
||||
private final String targetIndexPrefix;
|
||||
private final String targetNextStepName;
|
||||
|
||||
|
||||
public CopyExecutionStateStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) {
|
||||
public CopyExecutionStateStep(StepKey key, StepKey nextStepKey, String targetIndexPrefix, String targetNextStepName) {
|
||||
super(key, nextStepKey);
|
||||
this.shrunkIndexPrefix = shrunkIndexPrefix;
|
||||
this.targetIndexPrefix = targetIndexPrefix;
|
||||
this.targetNextStepName = targetNextStepName;
|
||||
}
|
||||
|
||||
String getShrunkIndexPrefix() {
|
||||
return shrunkIndexPrefix;
|
||||
String getTargetIndexPrefix() {
|
||||
return targetIndexPrefix;
|
||||
}
|
||||
|
||||
String getTargetNextStepName() {
|
||||
return targetNextStepName;
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -48,8 +57,8 @@ public class CopyExecutionStateStep extends ClusterStateActionStep {
|
||||
}
|
||||
// get source index
|
||||
String indexName = indexMetadata.getIndex().getName();
|
||||
// get target shrink index
|
||||
String targetIndexName = shrunkIndexPrefix + indexName;
|
||||
// get target index
|
||||
String targetIndexName = targetIndexPrefix + indexName;
|
||||
IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName);
|
||||
|
||||
if (targetIndexMetadata == null) {
|
||||
@ -67,8 +76,14 @@ public class CopyExecutionStateStep extends ClusterStateActionStep {
|
||||
LifecycleExecutionState.Builder relevantTargetCustomData = LifecycleExecutionState.builder();
|
||||
relevantTargetCustomData.setIndexCreationDate(lifecycleDate);
|
||||
relevantTargetCustomData.setPhase(phase);
|
||||
relevantTargetCustomData.setAction(action);
|
||||
relevantTargetCustomData.setStep(ShrunkenIndexCheckStep.NAME);
|
||||
relevantTargetCustomData.setStep(targetNextStepName);
|
||||
if (targetNextStepName.equals(PhaseCompleteStep.NAME)) {
|
||||
relevantTargetCustomData.setAction(PhaseCompleteStep.NAME);
|
||||
} else {
|
||||
relevantTargetCustomData.setAction(action);
|
||||
}
|
||||
relevantTargetCustomData.setSnapshotRepository(lifecycleState.getSnapshotRepository());
|
||||
relevantTargetCustomData.setSnapshotName(lifecycleState.getSnapshotName());
|
||||
|
||||
Metadata.Builder newMetadata = Metadata.builder(clusterState.getMetadata())
|
||||
.put(IndexMetadata.builder(targetIndexMetadata)
|
||||
@ -79,15 +94,22 @@ public class CopyExecutionStateStep extends ClusterStateActionStep {
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
if (!super.equals(o)) {
|
||||
return false;
|
||||
}
|
||||
CopyExecutionStateStep that = (CopyExecutionStateStep) o;
|
||||
return Objects.equals(shrunkIndexPrefix, that.shrunkIndexPrefix);
|
||||
return Objects.equals(targetIndexPrefix, that.targetIndexPrefix) &&
|
||||
Objects.equals(targetNextStepName, that.targetNextStepName);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), shrunkIndexPrefix);
|
||||
return Objects.hash(super.hashCode(), targetIndexPrefix, targetNextStepName);
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,112 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Copy the provided settings from the source to the target index.
|
||||
* <p>
|
||||
* The target index is derived from the source index using the provided prefix.
|
||||
* This is useful for actions like shrink or searchable snapshot that create a new index and migrate the ILM execution from the source
|
||||
* to the target index.
|
||||
*/
|
||||
public class CopySettingsStep extends ClusterStateActionStep {
|
||||
public static final String NAME = "copy-settings";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(CopySettingsStep.class);
|
||||
|
||||
private final String[] settingsKeys;
|
||||
private final String indexPrefix;
|
||||
|
||||
public CopySettingsStep(StepKey key, StepKey nextStepKey, String indexPrefix, String... settingsKeys) {
|
||||
super(key, nextStepKey);
|
||||
Objects.requireNonNull(indexPrefix);
|
||||
Objects.requireNonNull(settingsKeys);
|
||||
this.indexPrefix = indexPrefix;
|
||||
this.settingsKeys = settingsKeys;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRetryable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public String[] getSettingsKeys() {
|
||||
return settingsKeys;
|
||||
}
|
||||
|
||||
public String getIndexPrefix() {
|
||||
return indexPrefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState performAction(Index index, ClusterState clusterState) {
|
||||
String sourceIndexName = index.getName();
|
||||
IndexMetadata sourceIndexMetadata = clusterState.metadata().index(sourceIndexName);
|
||||
String targetIndexName = indexPrefix + sourceIndexName;
|
||||
IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName);
|
||||
|
||||
if (sourceIndexMetadata == null) {
|
||||
// Index must have been since deleted, ignore it
|
||||
logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), sourceIndexName);
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
if (settingsKeys == null || settingsKeys.length == 0) {
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
if (targetIndexMetadata == null) {
|
||||
String errorMessage = String.format(Locale.ROOT, "index [%s] is being referenced by ILM action [%s] on step [%s] but " +
|
||||
"it doesn't exist", targetIndexName, getKey().getAction(), getKey().getName());
|
||||
logger.debug(errorMessage);
|
||||
throw new IllegalStateException(errorMessage);
|
||||
}
|
||||
|
||||
Settings.Builder settings = Settings.builder().put(targetIndexMetadata.getSettings());
|
||||
for (String key : settingsKeys) {
|
||||
String value = sourceIndexMetadata.getSettings().get(key);
|
||||
settings.put(key, value);
|
||||
}
|
||||
|
||||
Metadata.Builder newMetaData = Metadata.builder(clusterState.getMetadata())
|
||||
.put(IndexMetadata.builder(targetIndexMetadata)
|
||||
.settingsVersion(targetIndexMetadata.getSettingsVersion() + 1)
|
||||
.settings(settings));
|
||||
return ClusterState.builder(clusterState).metadata(newMetaData).build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
if (!super.equals(o)) {
|
||||
return false;
|
||||
}
|
||||
CopySettingsStep that = (CopySettingsStep) o;
|
||||
return Objects.equals(settingsKeys, that.settingsKeys) &&
|
||||
Objects.equals(indexPrefix, that.indexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), settingsKeys, indexPrefix);
|
||||
}
|
||||
}
|
@ -0,0 +1,83 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata;
|
||||
|
||||
/**
|
||||
* Creates a snapshot of the managed index into the configured repository and snapshot name. The repository and snapshot names are expected
|
||||
* to be present in the lifecycle execution state (usually generated and stored by a different ILM step)
|
||||
*/
|
||||
public class CreateSnapshotStep extends AsyncRetryDuringSnapshotActionStep {
|
||||
public static final String NAME = "create-snapshot";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(CreateSnapshotStep.class);
|
||||
|
||||
public CreateSnapshotStep(StepKey key, StepKey nextStepKey, Client client) {
|
||||
super(key, nextStepKey, client);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRetryable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, Listener listener) {
|
||||
final String indexName = indexMetadata.getIndex().getName();
|
||||
|
||||
final LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetadata);
|
||||
|
||||
final String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME);
|
||||
final String snapshotRepository = lifecycleState.getSnapshotRepository();
|
||||
if (Strings.hasText(snapshotRepository) == false) {
|
||||
listener.onFailure(new IllegalStateException("snapshot repository is not present for policy [" + policyName + "] and index [" +
|
||||
indexName + "]"));
|
||||
return;
|
||||
}
|
||||
|
||||
final String snapshotName = lifecycleState.getSnapshotName();
|
||||
if (Strings.hasText(snapshotName) == false) {
|
||||
listener.onFailure(
|
||||
new IllegalStateException("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]"));
|
||||
return;
|
||||
}
|
||||
CreateSnapshotRequest request = new CreateSnapshotRequest(snapshotRepository, snapshotName);
|
||||
request.indices(indexName);
|
||||
// this is safe as the snapshot creation will still be async, it's just that the listener will be notified when the snapshot is
|
||||
// complete
|
||||
request.waitForCompletion(true);
|
||||
request.includeGlobalState(false);
|
||||
request.masterNodeTimeout(getMasterTimeout(currentClusterState));
|
||||
getClient().admin().cluster().createSnapshot(request,
|
||||
ActionListener.wrap(response -> {
|
||||
logger.debug("create snapshot response for policy [{}] and index [{}] is: {}", policyName, indexName,
|
||||
Strings.toString(response));
|
||||
final SnapshotInfo snapInfo = response.getSnapshotInfo();
|
||||
|
||||
// Check that there are no failed shards, since the request may not entirely
|
||||
// fail, but may still have failures (such as in the case of an aborted snapshot)
|
||||
if (snapInfo.failedShards() == 0) {
|
||||
listener.onResponse(true);
|
||||
} else {
|
||||
int failures = snapInfo.failedShards();
|
||||
int total = snapInfo.totalShards();
|
||||
logger.warn("failed to create snapshot successfully, {} failures out of {} total shards failed", failures, total);
|
||||
listener.onResponse(false);
|
||||
}
|
||||
}, listener::onFailure));
|
||||
}
|
||||
}
|
@ -5,17 +5,20 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A {@link LifecycleAction} which deletes the index.
|
||||
@ -23,20 +26,42 @@ import java.util.List;
|
||||
public class DeleteAction implements LifecycleAction {
|
||||
public static final String NAME = "delete";
|
||||
|
||||
private static final ObjectParser<DeleteAction, Void> PARSER = new ObjectParser<>(NAME, DeleteAction::new);
|
||||
public static final ParseField DELETE_SEARCHABLE_SNAPSHOT_FIELD = new ParseField("delete_searchable_snapshot");
|
||||
|
||||
private static final ConstructingObjectParser<DeleteAction, Void> PARSER = new ConstructingObjectParser<>(NAME,
|
||||
a -> new DeleteAction(a[0] == null ? true : (boolean) a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), DELETE_SEARCHABLE_SNAPSHOT_FIELD);
|
||||
}
|
||||
|
||||
public static DeleteAction parse(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
private final boolean deleteSearchableSnapshot;
|
||||
|
||||
public DeleteAction() {
|
||||
this(true);
|
||||
}
|
||||
|
||||
public DeleteAction(boolean deleteSearchableSnapshot) {
|
||||
this.deleteSearchableSnapshot = deleteSearchableSnapshot;
|
||||
}
|
||||
|
||||
public DeleteAction(StreamInput in) throws IOException {
|
||||
if (in.getVersion().onOrAfter(Version.V_7_8_0)) {
|
||||
this.deleteSearchableSnapshot = in.readBoolean();
|
||||
} else {
|
||||
this.deleteSearchableSnapshot = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().onOrAfter(Version.V_7_8_0)) {
|
||||
out.writeBoolean(deleteSearchableSnapshot);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -47,6 +72,7 @@ public class DeleteAction implements LifecycleAction {
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(DELETE_SEARCHABLE_SNAPSHOT_FIELD.getPreferredName(), deleteSearchableSnapshot);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
@ -60,15 +86,23 @@ public class DeleteAction implements LifecycleAction {
|
||||
public List<Step> toSteps(Client client, String phase, Step.StepKey nextStepKey) {
|
||||
Step.StepKey waitForNoFollowerStepKey = new Step.StepKey(phase, NAME, WaitForNoFollowersStep.NAME);
|
||||
Step.StepKey deleteStepKey = new Step.StepKey(phase, NAME, DeleteStep.NAME);
|
||||
Step.StepKey cleanSnapshotKey = new Step.StepKey(phase, NAME, CleanupSnapshotStep.NAME);
|
||||
|
||||
WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, deleteStepKey, client);
|
||||
DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client);
|
||||
return Arrays.asList(waitForNoFollowersStep, deleteStep);
|
||||
if (deleteSearchableSnapshot) {
|
||||
WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, cleanSnapshotKey, client);
|
||||
CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, deleteStepKey, client);
|
||||
DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client);
|
||||
return Arrays.asList(waitForNoFollowersStep, cleanupSnapshotStep, deleteStep);
|
||||
} else {
|
||||
WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, deleteStepKey, client);
|
||||
DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client);
|
||||
return Arrays.asList(waitForNoFollowersStep, deleteStep);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return 1;
|
||||
return Objects.hash(deleteSearchableSnapshot);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -79,7 +113,8 @@ public class DeleteAction implements LifecycleAction {
|
||||
if (obj.getClass() != getClass()) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
DeleteAction that = (DeleteAction) obj;
|
||||
return deleteSearchableSnapshot == that.deleteSearchableSnapshot;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY;
|
||||
import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata;
|
||||
|
||||
/**
|
||||
* Generates a snapshot name for the given index and records it in the index metadata along with the provided snapshot repository.
|
||||
* <p>
|
||||
* The generated snapshot name will be in the format {day-indexName-policyName-randomUUID}
|
||||
* eg.: 2020.03.30-myindex-mypolicy-cmuce-qfvmn_dstqw-ivmjc1etsa
|
||||
*/
|
||||
public class GenerateSnapshotNameStep extends ClusterStateActionStep {
|
||||
|
||||
public static final String NAME = "generate-snapshot-name";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(CreateSnapshotStep.class);
|
||||
|
||||
private static final IndexNameExpressionResolver.DateMathExpressionResolver DATE_MATH_RESOLVER =
|
||||
new IndexNameExpressionResolver.DateMathExpressionResolver();
|
||||
|
||||
private final String snapshotRepository;
|
||||
|
||||
public GenerateSnapshotNameStep(StepKey key, StepKey nextStepKey, String snapshotRepository) {
|
||||
super(key, nextStepKey);
|
||||
this.snapshotRepository = snapshotRepository;
|
||||
}
|
||||
|
||||
public String getSnapshotRepository() {
|
||||
return snapshotRepository;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState performAction(Index index, ClusterState clusterState) {
|
||||
IndexMetadata indexMetaData = clusterState.metadata().index(index);
|
||||
if (indexMetaData == null) {
|
||||
// Index must have been since deleted, ignore it
|
||||
logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName());
|
||||
return clusterState;
|
||||
}
|
||||
|
||||
ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState);
|
||||
|
||||
LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetaData);
|
||||
assert lifecycleState.getSnapshotName() == null : "index " + index.getName() + " should not have a snapshot generated by " +
|
||||
"the ilm policy but has " + lifecycleState.getSnapshotName();
|
||||
LifecycleExecutionState.Builder newCustomData = LifecycleExecutionState.builder(lifecycleState);
|
||||
String policy = indexMetaData.getSettings().get(LifecycleSettings.LIFECYCLE_NAME);
|
||||
String snapshotNamePrefix = ("<{now/d}-" + index.getName() + "-" + policy + ">").toLowerCase(Locale.ROOT);
|
||||
String snapshotName = generateSnapshotName(snapshotNamePrefix);
|
||||
ActionRequestValidationException validationException = validateGeneratedSnapshotName(snapshotNamePrefix, snapshotName);
|
||||
if (validationException != null) {
|
||||
logger.warn("unable to generate a snapshot name as part of policy [{}] for index [{}] due to [{}]",
|
||||
policy, index.getName(), validationException.getMessage());
|
||||
throw validationException;
|
||||
}
|
||||
newCustomData.setSnapshotName(snapshotName);
|
||||
newCustomData.setSnapshotRepository(snapshotRepository);
|
||||
|
||||
IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetaData);
|
||||
indexMetadataBuilder.putCustom(ILM_CUSTOM_METADATA_KEY, newCustomData.build().asMap());
|
||||
newClusterStateBuilder.metadata(Metadata.builder(clusterState.getMetadata()).put(indexMetadataBuilder));
|
||||
return newClusterStateBuilder.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), snapshotRepository);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
GenerateSnapshotNameStep other = (GenerateSnapshotNameStep) obj;
|
||||
return super.equals(obj) &&
|
||||
Objects.equals(snapshotRepository, other.snapshotRepository);
|
||||
}
|
||||
|
||||
/**
|
||||
* Since snapshots need to be uniquely named, this method will resolve any date math used in
|
||||
* the provided name, as well as appending a unique identifier so expressions that may overlap
|
||||
* still result in unique snapshot names.
|
||||
*/
|
||||
public static String generateSnapshotName(String name) {
|
||||
return generateSnapshotName(name, new ResolverContext());
|
||||
}
|
||||
|
||||
public static String generateSnapshotName(String name, IndexNameExpressionResolver.Context context) {
|
||||
List<String> candidates = DATE_MATH_RESOLVER.resolve(context, Collections.singletonList(name));
|
||||
if (candidates.size() != 1) {
|
||||
throw new IllegalStateException("resolving snapshot name " + name + " generated more than one candidate: " + candidates);
|
||||
}
|
||||
// TODO: we are breaking the rules of UUIDs by lowercasing this here, find an alternative (snapshot names must be lowercase)
|
||||
return candidates.get(0) + "-" + UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a context for the DateMathExpressionResolver, which does not require
|
||||
* {@code IndicesOptions} or {@code ClusterState} since it only uses the start
|
||||
* time to resolve expressions
|
||||
*/
|
||||
public static final class ResolverContext extends IndexNameExpressionResolver.Context {
|
||||
public ResolverContext() {
|
||||
this(System.currentTimeMillis());
|
||||
}
|
||||
|
||||
public ResolverContext(long startTime) {
|
||||
super(null, null, startTime, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState getState() {
|
||||
throw new UnsupportedOperationException("should never be called");
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions getOptions() {
|
||||
throw new UnsupportedOperationException("should never be called");
|
||||
}
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public static ActionRequestValidationException validateGeneratedSnapshotName(String snapshotPrefix, String snapshotName) {
|
||||
ActionRequestValidationException err = new ActionRequestValidationException();
|
||||
if (Strings.hasText(snapshotPrefix) == false) {
|
||||
err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: cannot be empty");
|
||||
}
|
||||
if (snapshotName.contains("#")) {
|
||||
err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must not contain '#'");
|
||||
}
|
||||
if (snapshotName.charAt(0) == '_') {
|
||||
err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must not start with '_'");
|
||||
}
|
||||
if (snapshotName.toLowerCase(Locale.ROOT).equals(snapshotName) == false) {
|
||||
err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must be lowercase");
|
||||
}
|
||||
if (Strings.validFileName(snapshotName) == false) {
|
||||
err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must not contain contain the following characters " +
|
||||
Strings.INVALID_FILENAME_CHARS);
|
||||
}
|
||||
|
||||
if (err.validationErrors().size() > 0) {
|
||||
return err;
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -46,6 +46,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
private static final ParseField STEP_INFO_FIELD = new ParseField("step_info");
|
||||
private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution");
|
||||
private static final ParseField AGE_FIELD = new ParseField("age");
|
||||
private static final ParseField REPOSITORY_NAME = new ParseField("repository_name");
|
||||
private static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name");
|
||||
|
||||
public static final ConstructingObjectParser<IndexLifecycleExplainResponse, Void> PARSER = new ConstructingObjectParser<>(
|
||||
"index_lifecycle_explain_response",
|
||||
@ -63,6 +65,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
(Long) (a[8]),
|
||||
(Long) (a[9]),
|
||||
(Long) (a[10]),
|
||||
(String) a[16],
|
||||
(String) a[17],
|
||||
(BytesReference) a[11],
|
||||
(PhaseExecutionInfo) a[12]
|
||||
// a[13] == "age"
|
||||
@ -89,6 +93,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), AGE_FIELD);
|
||||
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), IS_AUTO_RETRYABLE_ERROR_FIELD);
|
||||
PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_RETRY_COUNT_FIELD);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REPOSITORY_NAME);
|
||||
PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), SNAPSHOT_NAME);
|
||||
}
|
||||
|
||||
private final String index;
|
||||
@ -106,23 +112,28 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
private final PhaseExecutionInfo phaseExecutionInfo;
|
||||
private final Boolean isAutoRetryableError;
|
||||
private final Integer failedStepRetryCount;
|
||||
private final String repositoryName;
|
||||
private final String snapshotName;
|
||||
|
||||
public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, Long lifecycleDate,
|
||||
String phase, String action, String step, String failedStep, Boolean isAutoRetryableError, Integer failedStepRetryCount,
|
||||
Long phaseTime, Long actionTime, Long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) {
|
||||
Long phaseTime, Long actionTime, Long stepTime, String repositoryName, String snapshotName, BytesReference stepInfo,
|
||||
PhaseExecutionInfo phaseExecutionInfo) {
|
||||
return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep,
|
||||
isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo);
|
||||
isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, repositoryName, snapshotName, stepInfo,
|
||||
phaseExecutionInfo);
|
||||
}
|
||||
|
||||
public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) {
|
||||
return new IndexLifecycleExplainResponse(index, false, null, null, null, null, null, null, null, null, null, null, null, null,
|
||||
null);
|
||||
null, null, null);
|
||||
}
|
||||
|
||||
private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, Long lifecycleDate,
|
||||
String phase, String action, String step, String failedStep, Boolean isAutoRetryableError,
|
||||
Integer failedStepRetryCount, Long phaseTime, Long actionTime, Long stepTime,
|
||||
BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) {
|
||||
String repositoryName, String snapshotName, BytesReference stepInfo,
|
||||
PhaseExecutionInfo phaseExecutionInfo) {
|
||||
if (managedByILM) {
|
||||
if (policyName == null) {
|
||||
throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index");
|
||||
@ -157,6 +168,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
this.failedStepRetryCount = failedStepRetryCount;
|
||||
this.stepInfo = stepInfo;
|
||||
this.phaseExecutionInfo = phaseExecutionInfo;
|
||||
this.repositoryName = repositoryName;
|
||||
this.snapshotName = snapshotName;
|
||||
}
|
||||
|
||||
public IndexLifecycleExplainResponse(StreamInput in) throws IOException {
|
||||
@ -181,6 +194,13 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
isAutoRetryableError = null;
|
||||
failedStepRetryCount = null;
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_8_0)) {
|
||||
repositoryName = in.readOptionalString();
|
||||
snapshotName = in.readOptionalString();
|
||||
} else {
|
||||
repositoryName = null;
|
||||
snapshotName = null;
|
||||
}
|
||||
} else {
|
||||
policyName = null;
|
||||
lifecycleDate = null;
|
||||
@ -195,6 +215,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
stepTime = null;
|
||||
stepInfo = null;
|
||||
phaseExecutionInfo = null;
|
||||
repositoryName = null;
|
||||
snapshotName = null;
|
||||
}
|
||||
}
|
||||
|
||||
@ -218,6 +240,10 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
out.writeOptionalBoolean(isAutoRetryableError);
|
||||
out.writeOptionalVInt(failedStepRetryCount);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_8_0)) {
|
||||
out.writeOptionalString(repositoryName);
|
||||
out.writeOptionalString(snapshotName);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -289,6 +315,14 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
}
|
||||
}
|
||||
|
||||
public String getRepositoryName() {
|
||||
return repositoryName;
|
||||
}
|
||||
|
||||
public String getSnapshotName() {
|
||||
return snapshotName;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
@ -327,6 +361,12 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
if (failedStepRetryCount != null) {
|
||||
builder.field(FAILED_STEP_RETRY_COUNT_FIELD.getPreferredName(), failedStepRetryCount);
|
||||
}
|
||||
if (repositoryName != null) {
|
||||
builder.field(REPOSITORY_NAME.getPreferredName(), repositoryName);
|
||||
}
|
||||
if (snapshotName != null) {
|
||||
builder.field(SNAPSHOT_NAME.getPreferredName(), snapshotName);
|
||||
}
|
||||
if (stepInfo != null && stepInfo.length() > 0) {
|
||||
builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON);
|
||||
}
|
||||
@ -341,7 +381,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, isAutoRetryableError,
|
||||
failedStepRetryCount, phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo);
|
||||
failedStepRetryCount, phaseTime, actionTime, stepTime, repositoryName, snapshotName, stepInfo, phaseExecutionInfo);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -366,6 +406,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl
|
||||
Objects.equals(phaseTime, other.phaseTime) &&
|
||||
Objects.equals(actionTime, other.actionTime) &&
|
||||
Objects.equals(stepTime, other.stepTime) &&
|
||||
Objects.equals(repositoryName, other.repositoryName) &&
|
||||
Objects.equals(snapshotName, other.snapshotName) &&
|
||||
Objects.equals(stepInfo, other.stepInfo) &&
|
||||
Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo);
|
||||
}
|
||||
|
@ -36,6 +36,8 @@ public class LifecycleExecutionState {
|
||||
private static final String FAILED_STEP_RETRY_COUNT = "failed_step_retry_count";
|
||||
private static final String STEP_INFO = "step_info";
|
||||
private static final String PHASE_DEFINITION = "phase_definition";
|
||||
private static final String SNAPSHOT_NAME ="snapshot_name";
|
||||
private static final String SNAPSHOT_REPOSITORY ="snapshot_repository";
|
||||
|
||||
private final String phase;
|
||||
private final String action;
|
||||
@ -49,10 +51,12 @@ public class LifecycleExecutionState {
|
||||
private final Long phaseTime;
|
||||
private final Long actionTime;
|
||||
private final Long stepTime;
|
||||
private final String snapshotName;
|
||||
private final String snapshotRepository;
|
||||
|
||||
private LifecycleExecutionState(String phase, String action, String step, String failedStep, Boolean isAutoRetryableError,
|
||||
Integer failedStepRetryCount, String stepInfo, String phaseDefinition, Long lifecycleDate,
|
||||
Long phaseTime, Long actionTime, Long stepTime) {
|
||||
Long phaseTime, Long actionTime, Long stepTime, String snapshotRepository, String snapshotName) {
|
||||
this.phase = phase;
|
||||
this.action = action;
|
||||
this.step = step;
|
||||
@ -65,6 +69,8 @@ public class LifecycleExecutionState {
|
||||
this.phaseTime = phaseTime;
|
||||
this.actionTime = actionTime;
|
||||
this.stepTime = stepTime;
|
||||
this.snapshotRepository = snapshotRepository;
|
||||
this.snapshotName = snapshotName;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -122,6 +128,8 @@ public class LifecycleExecutionState {
|
||||
.setIndexCreationDate(state.lifecycleDate)
|
||||
.setPhaseTime(state.phaseTime)
|
||||
.setActionTime(state.actionTime)
|
||||
.setSnapshotRepository(state.snapshotRepository)
|
||||
.setSnapshotName(state.snapshotName)
|
||||
.setStepTime(state.stepTime);
|
||||
}
|
||||
|
||||
@ -151,6 +159,12 @@ public class LifecycleExecutionState {
|
||||
if (customData.containsKey(PHASE_DEFINITION)) {
|
||||
builder.setPhaseDefinition(customData.get(PHASE_DEFINITION));
|
||||
}
|
||||
if (customData.containsKey(SNAPSHOT_REPOSITORY)) {
|
||||
builder.setSnapshotRepository(customData.get(SNAPSHOT_REPOSITORY));
|
||||
}
|
||||
if (customData.containsKey(SNAPSHOT_NAME)) {
|
||||
builder.setSnapshotName(customData.get(SNAPSHOT_NAME));
|
||||
}
|
||||
if (customData.containsKey(INDEX_CREATION_DATE)) {
|
||||
try {
|
||||
builder.setIndexCreationDate(Long.parseLong(customData.get(INDEX_CREATION_DATE)));
|
||||
@ -229,6 +243,12 @@ public class LifecycleExecutionState {
|
||||
if (phaseDefinition != null) {
|
||||
result.put(PHASE_DEFINITION, String.valueOf(phaseDefinition));
|
||||
}
|
||||
if (snapshotRepository != null) {
|
||||
result.put(SNAPSHOT_REPOSITORY, snapshotRepository);
|
||||
}
|
||||
if (snapshotName != null) {
|
||||
result.put(SNAPSHOT_NAME, snapshotName);
|
||||
}
|
||||
return Collections.unmodifiableMap(result);
|
||||
}
|
||||
|
||||
@ -280,6 +300,14 @@ public class LifecycleExecutionState {
|
||||
return stepTime;
|
||||
}
|
||||
|
||||
public String getSnapshotName() {
|
||||
return snapshotName;
|
||||
}
|
||||
|
||||
public String getSnapshotRepository() {
|
||||
return snapshotRepository;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
@ -296,13 +324,16 @@ public class LifecycleExecutionState {
|
||||
Objects.equals(isAutoRetryableError(), that.isAutoRetryableError()) &&
|
||||
Objects.equals(getFailedStepRetryCount(), that.getFailedStepRetryCount()) &&
|
||||
Objects.equals(getStepInfo(), that.getStepInfo()) &&
|
||||
Objects.equals(getSnapshotRepository(), that.getSnapshotRepository()) &&
|
||||
Objects.equals(getSnapshotName(), that.getSnapshotName()) &&
|
||||
Objects.equals(getPhaseDefinition(), that.getPhaseDefinition());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(getPhase(), getAction(), getStep(), getFailedStep(), isAutoRetryableError(), getFailedStepRetryCount(),
|
||||
getStepInfo(), getPhaseDefinition(), getLifecycleDate(), getPhaseTime(), getActionTime(), getStepTime());
|
||||
getStepInfo(), getPhaseDefinition(), getLifecycleDate(), getPhaseTime(), getActionTime(), getStepTime(),
|
||||
getSnapshotRepository(), getSnapshotName());
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -323,6 +354,8 @@ public class LifecycleExecutionState {
|
||||
private Long stepTime;
|
||||
private Boolean isAutoRetryableError;
|
||||
private Integer failedStepRetryCount;
|
||||
private String snapshotName;
|
||||
private String snapshotRepository;
|
||||
|
||||
public Builder setPhase(String phase) {
|
||||
this.phase = phase;
|
||||
@ -384,9 +417,19 @@ public class LifecycleExecutionState {
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setSnapshotRepository(String snapshotRepository) {
|
||||
this.snapshotRepository = snapshotRepository;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder setSnapshotName(String snapshotName) {
|
||||
this.snapshotName = snapshotName;
|
||||
return this;
|
||||
}
|
||||
|
||||
public LifecycleExecutionState build() {
|
||||
return new LifecycleExecutionState(phase, action, step, failedStep, isAutoRetryableError, failedStepRetryCount, stepInfo,
|
||||
phaseDefinition, indexCreationDate, phaseTime, actionTime, stepTime);
|
||||
phaseDefinition, indexCreationDate, phaseTime, actionTime, stepTime, snapshotRepository, snapshotName);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,119 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction;
|
||||
import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata;
|
||||
|
||||
/**
|
||||
* Restores the snapshot created for the designated index via the ILM policy to an index named using the provided prefix appended to the
|
||||
* designated index name.
|
||||
*/
|
||||
public class MountSnapshotStep extends AsyncRetryDuringSnapshotActionStep {
|
||||
public static final String NAME = "mount-snapshot";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(MountSnapshotStep.class);
|
||||
|
||||
private final String restoredIndexPrefix;
|
||||
|
||||
public MountSnapshotStep(StepKey key, StepKey nextStepKey, Client client, String restoredIndexPrefix) {
|
||||
super(key, nextStepKey, client);
|
||||
this.restoredIndexPrefix = restoredIndexPrefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRetryable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getRestoredIndexPrefix() {
|
||||
return restoredIndexPrefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, Listener listener) {
|
||||
final String indexName = indexMetadata.getIndex().getName();
|
||||
|
||||
LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetadata);
|
||||
|
||||
String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME);
|
||||
final String snapshotRepository = lifecycleState.getSnapshotRepository();
|
||||
if (Strings.hasText(snapshotRepository) == false) {
|
||||
listener.onFailure(new IllegalStateException("snapshot repository is not present for policy [" + policyName + "] and index [" +
|
||||
indexName + "]"));
|
||||
return;
|
||||
}
|
||||
|
||||
final String snapshotName = lifecycleState.getSnapshotName();
|
||||
if (Strings.hasText(snapshotName) == false) {
|
||||
listener.onFailure(
|
||||
new IllegalStateException("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]"));
|
||||
return;
|
||||
}
|
||||
|
||||
String mountedIndexName = restoredIndexPrefix + indexName;
|
||||
if(currentClusterState.metadata().index(mountedIndexName) != null) {
|
||||
logger.debug("mounted index [{}] for policy [{}] and index [{}] already exists. will not attempt to mount the index again",
|
||||
mountedIndexName, policyName, indexName);
|
||||
listener.onResponse(true);
|
||||
return;
|
||||
}
|
||||
|
||||
final MountSearchableSnapshotRequest mountSearchableSnapshotRequest = new MountSearchableSnapshotRequest(mountedIndexName,
|
||||
snapshotRepository, snapshotName, indexName, Settings.builder()
|
||||
.put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), Boolean.FALSE.toString())
|
||||
.build(),
|
||||
// we captured the index metadata when we took the snapshot. the index likely had the ILM execution state in the metadata.
|
||||
// if we were to restore the lifecycle.name setting, the restored index would be captured by the ILM runner and,
|
||||
// depending on what ILM execution state was captured at snapshot time, make it's way forward from _that_ step forward in
|
||||
// the ILM policy.
|
||||
// we'll re-set this setting on the restored index at a later step once we restored a deterministic execution state
|
||||
new String[]{LifecycleSettings.LIFECYCLE_NAME},
|
||||
// we'll not wait for the snapshot to complete in this step as the async steps are executed from threads that shouldn't
|
||||
// perform expensive operations (ie. clusterStateProcessed)
|
||||
false);
|
||||
getClient().execute(MountSearchableSnapshotAction.INSTANCE, mountSearchableSnapshotRequest,
|
||||
ActionListener.wrap(response -> {
|
||||
if (response.status() != RestStatus.OK && response.status() != RestStatus.ACCEPTED) {
|
||||
logger.debug("mount snapshot response failed to complete");
|
||||
throw new ElasticsearchException("mount snapshot response failed to complete, got response " + response.status());
|
||||
}
|
||||
listener.onResponse(true);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), restoredIndexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
MountSnapshotStep other = (MountSnapshotStep) obj;
|
||||
return super.equals(obj) && Objects.equals(restoredIndexPrefix, other.restoredIndexPrefix);
|
||||
}
|
||||
}
|
@ -0,0 +1,137 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A {@link LifecycleAction} that will convert the index into a searchable snapshot, by taking a snapshot of the index, creating a
|
||||
* searchable snapshot and the corresponding "searchable snapshot index", deleting the original index and swapping its aliases to the
|
||||
* newly created searchable snapshot backed index.
|
||||
*/
|
||||
public class SearchableSnapshotAction implements LifecycleAction {
|
||||
public static final String NAME = "searchable_snapshot";
|
||||
|
||||
public static final ParseField SNAPSHOT_REPOSITORY = new ParseField("snapshot_repository");
|
||||
|
||||
public static final String RESTORED_INDEX_PREFIX = "restored-";
|
||||
|
||||
private static final ConstructingObjectParser<SearchableSnapshotAction, Void> PARSER = new ConstructingObjectParser<>(NAME,
|
||||
a -> new SearchableSnapshotAction((String) a[0]));
|
||||
|
||||
static {
|
||||
PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_REPOSITORY);
|
||||
}
|
||||
|
||||
public static SearchableSnapshotAction parse(XContentParser parser) {
|
||||
return PARSER.apply(parser, null);
|
||||
}
|
||||
|
||||
private final String snapshotRepository;
|
||||
|
||||
public SearchableSnapshotAction(String snapshotRepository) {
|
||||
if (Strings.hasText(snapshotRepository) == false) {
|
||||
throw new IllegalArgumentException("the snapshot repository must be specified");
|
||||
}
|
||||
this.snapshotRepository = snapshotRepository;
|
||||
}
|
||||
|
||||
public SearchableSnapshotAction(StreamInput in) throws IOException {
|
||||
this(in.readString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Step> toSteps(Client client, String phase, StepKey nextStepKey) {
|
||||
StepKey waitForNoFollowerStepKey = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME);
|
||||
StepKey generateSnapshotNameKey = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME);
|
||||
StepKey cleanSnapshotKey = new StepKey(phase, NAME, CleanupSnapshotStep.NAME);
|
||||
StepKey createSnapshotKey = new StepKey(phase, NAME, CreateSnapshotStep.NAME);
|
||||
StepKey mountSnapshotKey = new StepKey(phase, NAME, MountSnapshotStep.NAME);
|
||||
StepKey waitForGreenRestoredIndexKey = new StepKey(phase, NAME, WaitForIndexColorStep.NAME);
|
||||
StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME);
|
||||
StepKey copyLifecyclePolicySettingKey = new StepKey(phase, NAME, CopySettingsStep.NAME);
|
||||
StepKey swapAliasesKey = new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME);
|
||||
|
||||
WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, generateSnapshotNameKey,
|
||||
client);
|
||||
GenerateSnapshotNameStep generateSnapshotNameStep = new GenerateSnapshotNameStep(generateSnapshotNameKey, cleanSnapshotKey,
|
||||
snapshotRepository);
|
||||
CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, createSnapshotKey, client);
|
||||
AsyncActionBranchingStep createSnapshotBranchingStep = new AsyncActionBranchingStep(
|
||||
new CreateSnapshotStep(createSnapshotKey, mountSnapshotKey, client), cleanSnapshotKey, client);
|
||||
MountSnapshotStep mountSnapshotStep = new MountSnapshotStep(mountSnapshotKey, waitForGreenRestoredIndexKey,
|
||||
client, RESTORED_INDEX_PREFIX);
|
||||
WaitForIndexColorStep waitForGreenIndexHealthStep = new WaitForIndexColorStep(waitForGreenRestoredIndexKey,
|
||||
copyMetadataKey, ClusterHealthStatus.GREEN, RESTORED_INDEX_PREFIX);
|
||||
// a policy with only the cold phase will have a null "nextStepKey", hence the "null" nextStepKey passed in below when that's the
|
||||
// case
|
||||
CopyExecutionStateStep copyMetadataStep = new CopyExecutionStateStep(copyMetadataKey, copyLifecyclePolicySettingKey,
|
||||
RESTORED_INDEX_PREFIX, nextStepKey != null ? nextStepKey.getName() : "null");
|
||||
CopySettingsStep copySettingsStep = new CopySettingsStep(copyLifecyclePolicySettingKey, swapAliasesKey, RESTORED_INDEX_PREFIX,
|
||||
LifecycleSettings.LIFECYCLE_NAME);
|
||||
// sending this step to null as the restored index (which will after this step essentially be the source index) was sent to the next
|
||||
// key after we restored the lifecycle execution state
|
||||
SwapAliasesAndDeleteSourceIndexStep swapAliasesAndDeleteSourceIndexStep = new SwapAliasesAndDeleteSourceIndexStep(swapAliasesKey,
|
||||
null, client, RESTORED_INDEX_PREFIX);
|
||||
|
||||
return Arrays.asList(waitForNoFollowersStep, generateSnapshotNameStep, cleanupSnapshotStep, createSnapshotBranchingStep,
|
||||
mountSnapshotStep, waitForGreenIndexHealthStep, copyMetadataStep, copySettingsStep, swapAliasesAndDeleteSourceIndexStep);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSafeAction() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getWriteableName() {
|
||||
return NAME;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(snapshotRepository);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(SNAPSHOT_REPOSITORY.getPreferredName(), snapshotRepository);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) {
|
||||
return true;
|
||||
}
|
||||
if (o == null || getClass() != o.getClass()) {
|
||||
return false;
|
||||
}
|
||||
SearchableSnapshotAction that = (SearchableSnapshotAction) o;
|
||||
return Objects.equals(snapshotRepository, that.snapshotRepository);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshotRepository);
|
||||
}
|
||||
}
|
@ -104,7 +104,8 @@ public class ShrinkAction implements LifecycleAction {
|
||||
CheckShrinkReadyStep checkShrinkReadyStep = new CheckShrinkReadyStep(allocationRoutedKey, shrinkKey);
|
||||
ShrinkStep shrink = new ShrinkStep(shrinkKey, enoughShardsKey, client, numberOfShards, SHRUNKEN_INDEX_PREFIX);
|
||||
ShrunkShardsAllocatedStep allocated = new ShrunkShardsAllocatedStep(enoughShardsKey, copyMetadataKey, SHRUNKEN_INDEX_PREFIX);
|
||||
CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, aliasKey, SHRUNKEN_INDEX_PREFIX);
|
||||
CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, aliasKey, SHRUNKEN_INDEX_PREFIX,
|
||||
ShrunkenIndexCheckStep.NAME);
|
||||
ShrinkSetAliasStep aliasSwapAndDelete = new ShrinkSetAliasStep(aliasKey, isShrunkIndexKey, client, SHRUNKEN_INDEX_PREFIX);
|
||||
ShrunkenIndexCheckStep waitOnShrinkTakeover = new ShrunkenIndexCheckStep(isShrunkIndexKey, nextStepKey, SHRUNKEN_INDEX_PREFIX);
|
||||
return Arrays.asList(conditionalSkipShrinkStep, waitForNoFollowersStep, readOnlyStep, setSingleNodeStep, checkShrinkReadyStep,
|
||||
|
@ -5,15 +5,14 @@
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.SwapAliasesAndDeleteSourceIndexStep.deleteSourceIndexAndTransferAliases;
|
||||
|
||||
/**
|
||||
* Following shrinking an index and deleting the original index, this step creates an alias with the same name as the original index which
|
||||
* points to the new shrunken index to allow clients to continue to use the original index name without being aware that it has shrunk.
|
||||
@ -37,23 +36,7 @@ public class ShrinkSetAliasStep extends AsyncRetryDuringSnapshotActionStep {
|
||||
String index = indexMetadata.getIndex().getName();
|
||||
// get target shrink index
|
||||
String targetIndexName = shrunkIndexPrefix + index;
|
||||
IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest()
|
||||
.masterNodeTimeout(getMasterTimeout(currentState))
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index(index))
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index(targetIndexName).alias(index));
|
||||
// copy over other aliases from original index
|
||||
indexMetadata.getAliases().values().spliterator().forEachRemaining(aliasMetadataObjectCursor -> {
|
||||
AliasMetadata aliasMetadataToAdd = aliasMetadataObjectCursor.value;
|
||||
// inherit all alias properties except `is_write_index`
|
||||
aliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
|
||||
.index(targetIndexName).alias(aliasMetadataToAdd.alias())
|
||||
.indexRouting(aliasMetadataToAdd.indexRouting())
|
||||
.searchRouting(aliasMetadataToAdd.searchRouting())
|
||||
.filter(aliasMetadataToAdd.filter() == null ? null : aliasMetadataToAdd.filter().string())
|
||||
.writeIndex(null));
|
||||
});
|
||||
getClient().admin().indices().aliases(aliasesRequest, ActionListener.wrap(response ->
|
||||
listener.onResponse(true), listener::onFailure));
|
||||
deleteSourceIndexAndTransferAliases(getClient(), indexMetadata, getMasterTimeout(currentState), targetIndexName, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,121 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* This step swaps all the aliases from the source index to the restored index and deletes the source index. This is useful in scenarios
|
||||
* following a restore from snapshot operation where the restored index will take the place of the source index in the ILM lifecycle.
|
||||
*/
|
||||
public class SwapAliasesAndDeleteSourceIndexStep extends AsyncActionStep {
|
||||
public static final String NAME = "swap-aliases";
|
||||
private static final Logger logger = LogManager.getLogger(SwapAliasesAndDeleteSourceIndexStep.class);
|
||||
|
||||
private final String targetIndexPrefix;
|
||||
|
||||
public SwapAliasesAndDeleteSourceIndexStep(StepKey key, StepKey nextStepKey, Client client, String targetIndexPrefix) {
|
||||
super(key, nextStepKey, client);
|
||||
this.targetIndexPrefix = targetIndexPrefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isRetryable() {
|
||||
return true;
|
||||
}
|
||||
|
||||
public String getTargetIndexPrefix() {
|
||||
return targetIndexPrefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer,
|
||||
Listener listener) {
|
||||
String originalIndex = indexMetadata.getIndex().getName();
|
||||
final String targetIndexName = targetIndexPrefix + originalIndex;
|
||||
IndexMetadata targetIndexMetadata = currentClusterState.metadata().index(targetIndexName);
|
||||
|
||||
if (targetIndexMetadata == null) {
|
||||
String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME);
|
||||
String errorMessage = String.format(Locale.ROOT, "target index [%s] doesn't exist. stopping execution of lifecycle [%s] for" +
|
||||
" index [%s]", targetIndexName, policyName, originalIndex);
|
||||
logger.debug(errorMessage);
|
||||
listener.onFailure(new IllegalStateException(errorMessage));
|
||||
return;
|
||||
}
|
||||
|
||||
deleteSourceIndexAndTransferAliases(getClient(), indexMetadata, getMasterTimeout(currentClusterState), targetIndexName, listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Executes an {@link IndicesAliasesRequest} to copy over all the aliases from the source to the target index, and remove the source
|
||||
* index.
|
||||
* <p>
|
||||
* The is_write_index will *not* be set on the target index as this operation is currently executed on read-only indices.
|
||||
*/
|
||||
static void deleteSourceIndexAndTransferAliases(Client client, IndexMetadata sourceIndex, TimeValue masterTimeoutValue,
|
||||
String targetIndex, Listener listener) {
|
||||
String sourceIndexName = sourceIndex.getIndex().getName();
|
||||
IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest()
|
||||
.masterNodeTimeout(masterTimeoutValue)
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index(sourceIndexName))
|
||||
.addAliasAction(IndicesAliasesRequest.AliasActions.add().index(targetIndex).alias(sourceIndexName));
|
||||
// copy over other aliases from source index
|
||||
sourceIndex.getAliases().values().spliterator().forEachRemaining(aliasMetaDataObjectCursor -> {
|
||||
AliasMetadata aliasMetaDataToAdd = aliasMetaDataObjectCursor.value;
|
||||
// inherit all alias properties except `is_write_index`
|
||||
aliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add()
|
||||
.index(targetIndex).alias(aliasMetaDataToAdd.alias())
|
||||
.indexRouting(aliasMetaDataToAdd.indexRouting())
|
||||
.searchRouting(aliasMetaDataToAdd.searchRouting())
|
||||
.filter(aliasMetaDataToAdd.filter() == null ? null : aliasMetaDataToAdd.filter().string())
|
||||
.writeIndex(null));
|
||||
});
|
||||
|
||||
client.admin().indices().aliases(aliasesRequest,
|
||||
ActionListener.wrap(response -> {
|
||||
if (response.isAcknowledged() == false) {
|
||||
logger.warn("aliases swap from [{}] to [{}] response was not acknowledged", sourceIndexName, targetIndex);
|
||||
}
|
||||
listener.onResponse(true);
|
||||
}, listener::onFailure));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean indexSurvives() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), targetIndexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
return false;
|
||||
}
|
||||
if (getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
SwapAliasesAndDeleteSourceIndexStep other = (SwapAliasesAndDeleteSourceIndexStep) obj;
|
||||
return super.equals(obj) &&
|
||||
Objects.equals(targetIndexPrefix, other.targetIndexPrefix);
|
||||
}
|
||||
}
|
@ -41,7 +41,7 @@ public class TimeseriesLifecycleType implements LifecycleType {
|
||||
static final List<String> ORDERED_VALID_WARM_ACTIONS = Arrays.asList(SetPriorityAction.NAME, UnfollowAction.NAME, ReadOnlyAction.NAME,
|
||||
AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME);
|
||||
static final List<String> ORDERED_VALID_COLD_ACTIONS = Arrays.asList(SetPriorityAction.NAME, UnfollowAction.NAME, AllocateAction.NAME,
|
||||
FreezeAction.NAME);
|
||||
FreezeAction.NAME, SearchableSnapshotAction.NAME);
|
||||
static final List<String> ORDERED_VALID_DELETE_ACTIONS = Arrays.asList(WaitForSnapshotAction.NAME, DeleteAction.NAME);
|
||||
static final Set<String> VALID_HOT_ACTIONS = Sets.newHashSet(ORDERED_VALID_HOT_ACTIONS);
|
||||
static final Set<String> VALID_WARM_ACTIONS = Sets.newHashSet(ORDERED_VALID_WARM_ACTIONS);
|
||||
@ -74,8 +74,9 @@ public class TimeseriesLifecycleType implements LifecycleType {
|
||||
Phase phase = phases.get(phaseName);
|
||||
if (phase != null) {
|
||||
Map<String, LifecycleAction> actions = phase.getActions();
|
||||
if (actions.containsKey(UnfollowAction.NAME) == false
|
||||
&& (actions.containsKey(RolloverAction.NAME) || actions.containsKey(ShrinkAction.NAME))) {
|
||||
if (actions.containsKey(UnfollowAction.NAME) == false &&
|
||||
(actions.containsKey(RolloverAction.NAME) || actions.containsKey(ShrinkAction.NAME) ||
|
||||
actions.containsKey(SearchableSnapshotAction.NAME))) {
|
||||
Map<String, LifecycleAction> actionMap = new HashMap<>(phase.getActions());
|
||||
actionMap.put(UnfollowAction.NAME, new UnfollowAction());
|
||||
phase = new Phase(phase.getName(), phase.getMinimumAge(), actionMap);
|
||||
|
@ -7,42 +7,58 @@
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Locale;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Wait Step for index based on color
|
||||
* Wait Step for index based on color. Optionally derives the index name using the provided prefix (if any).
|
||||
*/
|
||||
|
||||
class WaitForIndexColorStep extends ClusterStateWaitStep {
|
||||
|
||||
static final String NAME = "wait-for-index-color";
|
||||
|
||||
private static final Logger logger = LogManager.getLogger(WaitForIndexColorStep.class);
|
||||
|
||||
private final ClusterHealthStatus color;
|
||||
@Nullable
|
||||
private final String indexNamePrefix;
|
||||
|
||||
WaitForIndexColorStep(StepKey key, StepKey nextStepKey, ClusterHealthStatus color) {
|
||||
this(key, nextStepKey, color, null);
|
||||
}
|
||||
|
||||
WaitForIndexColorStep(StepKey key, StepKey nextStepKey, ClusterHealthStatus color, @Nullable String indexNamePrefix) {
|
||||
super(key, nextStepKey);
|
||||
this.color = color;
|
||||
this.indexNamePrefix = indexNamePrefix;
|
||||
}
|
||||
|
||||
public ClusterHealthStatus getColor() {
|
||||
return this.color;
|
||||
}
|
||||
|
||||
public String getIndexNamePrefix() {
|
||||
return indexNamePrefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), this.color);
|
||||
return Objects.hash(super.hashCode(), this.color, this.indexNamePrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -54,13 +70,23 @@ class WaitForIndexColorStep extends ClusterStateWaitStep {
|
||||
return false;
|
||||
}
|
||||
WaitForIndexColorStep other = (WaitForIndexColorStep) obj;
|
||||
return super.equals(obj) && Objects.equals(this.color, other.color);
|
||||
return super.equals(obj) && Objects.equals(this.color, other.color) && Objects.equals(this.indexNamePrefix, other.indexNamePrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Result isConditionMet(Index index, ClusterState clusterState) {
|
||||
RoutingTable routingTable = clusterState.routingTable();
|
||||
IndexRoutingTable indexRoutingTable = routingTable.index(index);
|
||||
String indexName = indexNamePrefix != null ? indexNamePrefix + index.getName() : index.getName();
|
||||
IndexMetadata indexMetadata = clusterState.metadata().index(index);
|
||||
|
||||
if (indexMetadata == null) {
|
||||
String errorMessage = String.format(Locale.ROOT, "[%s] lifecycle action for index [%s] executed but index no longer exists",
|
||||
getKey().getAction(), indexName);
|
||||
// Index must have been since deleted
|
||||
logger.debug(errorMessage);
|
||||
return new Result(false, new Info(errorMessage));
|
||||
}
|
||||
|
||||
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(indexMetadata.getIndex());
|
||||
Result result;
|
||||
switch (this.color) {
|
||||
case GREEN:
|
||||
|
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.searchablesnapshots;
|
||||
|
||||
import org.elasticsearch.action.ActionType;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
|
||||
public class MountSearchableSnapshotAction extends ActionType<RestoreSnapshotResponse> {
|
||||
|
||||
public static final MountSearchableSnapshotAction INSTANCE = new MountSearchableSnapshotAction();
|
||||
public static final String NAME = "cluster:admin/snapshot/mount";
|
||||
|
||||
private MountSearchableSnapshotAction() {
|
||||
super(NAME, RestoreSnapshotResponse::new);
|
||||
}
|
||||
}
|
@ -0,0 +1,189 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.searchablesnapshots;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.support.master.MasterNodeRequest;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.ConstructingObjectParser;
|
||||
import org.elasticsearch.common.xcontent.ObjectParser;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.rest.RestRequest;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.readSettingsFromStream;
|
||||
import static org.elasticsearch.common.settings.Settings.writeSettingsToStream;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg;
|
||||
import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg;
|
||||
|
||||
public class MountSearchableSnapshotRequest extends MasterNodeRequest<MountSearchableSnapshotRequest> {
|
||||
|
||||
public static final ConstructingObjectParser<MountSearchableSnapshotRequest, RestRequest> PARSER = new ConstructingObjectParser<>(
|
||||
"mount_searchable_snapshot", true,
|
||||
(a, request) -> new MountSearchableSnapshotRequest(
|
||||
(String) ((a[1] != null) ? a[1] : Objects.requireNonNull(a[0])),
|
||||
request.param("repository"),
|
||||
request.param("snapshot"),
|
||||
(String) a[0],
|
||||
(Settings) ((a[2] != null) ? a[2] : Settings.EMPTY),
|
||||
(String[]) ((a[3] != null) ? a[3] : Strings.EMPTY_ARRAY),
|
||||
request.paramAsBoolean("wait_for_completion", false)));
|
||||
|
||||
private static final ParseField INDEX_FIELD = new ParseField("index");
|
||||
private static final ParseField RENAMED_INDEX_FIELD = new ParseField("renamed_index");
|
||||
private static final ParseField INDEX_SETTINGS_FIELD = new ParseField("index_settings");
|
||||
private static final ParseField IGNORE_INDEX_SETTINGS_FIELD = new ParseField("ignore_index_settings");
|
||||
|
||||
static {
|
||||
PARSER.declareField(constructorArg(), XContentParser::text, INDEX_FIELD, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(optionalConstructorArg(), XContentParser::text, RENAMED_INDEX_FIELD, ObjectParser.ValueType.STRING);
|
||||
PARSER.declareField(optionalConstructorArg(), Settings::fromXContent, INDEX_SETTINGS_FIELD, ObjectParser.ValueType.OBJECT);
|
||||
PARSER.declareField(optionalConstructorArg(),
|
||||
p -> p.list().stream().map(s -> (String) s).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY),
|
||||
IGNORE_INDEX_SETTINGS_FIELD, ObjectParser.ValueType.STRING_ARRAY);
|
||||
}
|
||||
|
||||
private final String mountedIndexName;
|
||||
private final String repositoryName;
|
||||
private final String snapshotName;
|
||||
private final String snapshotIndexName;
|
||||
private final Settings indexSettings;
|
||||
private final String[] ignoredIndexSettings;
|
||||
private final boolean waitForCompletion;
|
||||
|
||||
/**
|
||||
* Constructs a new mount searchable snapshot request, restoring an index with the settings needed to make it a searchable snapshot.
|
||||
*/
|
||||
public MountSearchableSnapshotRequest(String mountedIndexName, String repositoryName, String snapshotName, String snapshotIndexName,
|
||||
Settings indexSettings, String[] ignoredIndexSettings, boolean waitForCompletion) {
|
||||
this.mountedIndexName = Objects.requireNonNull(mountedIndexName);
|
||||
this.repositoryName = Objects.requireNonNull(repositoryName);
|
||||
this.snapshotName = Objects.requireNonNull(snapshotName);
|
||||
this.snapshotIndexName = Objects.requireNonNull(snapshotIndexName);
|
||||
this.indexSettings = Objects.requireNonNull(indexSettings);
|
||||
this.ignoredIndexSettings = Objects.requireNonNull(ignoredIndexSettings);
|
||||
this.waitForCompletion = waitForCompletion;
|
||||
}
|
||||
|
||||
public MountSearchableSnapshotRequest(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
this.mountedIndexName = in.readString();
|
||||
this.repositoryName = in.readString();
|
||||
this.snapshotName = in.readString();
|
||||
this.snapshotIndexName = in.readString();
|
||||
this.indexSettings = readSettingsFromStream(in);
|
||||
this.ignoredIndexSettings = in.readStringArray();
|
||||
this.waitForCompletion = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(mountedIndexName);
|
||||
out.writeString(repositoryName);
|
||||
out.writeString(snapshotName);
|
||||
out.writeString(snapshotIndexName);
|
||||
writeSettingsToStream(indexSettings, out);
|
||||
out.writeStringArray(ignoredIndexSettings);
|
||||
out.writeBoolean(waitForCompletion);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionRequestValidationException validate() {
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name of the index that will be created
|
||||
*/
|
||||
public String mountedIndexName() {
|
||||
return mountedIndexName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name of the repository
|
||||
*/
|
||||
public String repositoryName() {
|
||||
return this.repositoryName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name of the snapshot.
|
||||
*/
|
||||
public String snapshotName() {
|
||||
return this.snapshotName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the name of the index contained in the snapshot
|
||||
*/
|
||||
public String snapshotIndexName() {
|
||||
return snapshotIndexName;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true if the operation will wait for completion
|
||||
*/
|
||||
public boolean waitForCompletion() {
|
||||
return waitForCompletion;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return settings that should be added to the index when it is mounted
|
||||
*/
|
||||
public Settings indexSettings() {
|
||||
return this.indexSettings;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the names of settings that should be removed from the index when it is mounted
|
||||
*/
|
||||
public String[] ignoreIndexSettings() {
|
||||
return ignoredIndexSettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getDescription() {
|
||||
return "mount snapshot [" + repositoryName + ":" + snapshotName + ":" + snapshotIndexName + "] as [" + mountedIndexName + "]";
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
MountSearchableSnapshotRequest that = (MountSearchableSnapshotRequest) o;
|
||||
return waitForCompletion == that.waitForCompletion &&
|
||||
Objects.equals(mountedIndexName, that.mountedIndexName) &&
|
||||
Objects.equals(repositoryName, that.repositoryName) &&
|
||||
Objects.equals(snapshotName, that.snapshotName) &&
|
||||
Objects.equals(snapshotIndexName, that.snapshotIndexName) &&
|
||||
Objects.equals(indexSettings, that.indexSettings) &&
|
||||
Arrays.equals(ignoredIndexSettings, that.ignoredIndexSettings) &&
|
||||
Objects.equals(masterNodeTimeout, that.masterNodeTimeout);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = Objects.hash(mountedIndexName, repositoryName, snapshotName, snapshotIndexName, indexSettings, waitForCompletion,
|
||||
masterNodeTimeout);
|
||||
result = 31 * result + Arrays.hashCode(ignoredIndexSettings);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return getDescription();
|
||||
}
|
||||
}
|
@ -0,0 +1,452 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.searchablesnapshots;
|
||||
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContentObject;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
import static java.util.Collections.unmodifiableList;
|
||||
import static java.util.stream.Collectors.toList;
|
||||
|
||||
public class SearchableSnapshotShardStats implements Writeable, ToXContentObject {
|
||||
|
||||
private final List<CacheIndexInputStats> inputStats;
|
||||
private final ShardRouting shardRouting;
|
||||
private final SnapshotId snapshotId;
|
||||
private final IndexId indexId;
|
||||
|
||||
public SearchableSnapshotShardStats(ShardRouting shardRouting, SnapshotId snapshotId, IndexId indexId,
|
||||
List<CacheIndexInputStats> stats) {
|
||||
this.shardRouting = Objects.requireNonNull(shardRouting);
|
||||
this.snapshotId = Objects.requireNonNull(snapshotId);
|
||||
this.indexId = Objects.requireNonNull(indexId);
|
||||
this.inputStats = unmodifiableList(Objects.requireNonNull(stats));
|
||||
}
|
||||
|
||||
public SearchableSnapshotShardStats(StreamInput in) throws IOException {
|
||||
this.shardRouting = new ShardRouting(in);
|
||||
this.snapshotId = new SnapshotId(in);
|
||||
this.indexId = new IndexId(in);
|
||||
this.inputStats = in.readList(CacheIndexInputStats::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
shardRouting.writeTo(out);
|
||||
snapshotId.writeTo(out);
|
||||
indexId.writeTo(out);
|
||||
out.writeList(inputStats);
|
||||
}
|
||||
|
||||
public ShardRouting getShardRouting() {
|
||||
return shardRouting;
|
||||
}
|
||||
|
||||
public SnapshotId getSnapshotId() {
|
||||
return snapshotId;
|
||||
}
|
||||
|
||||
public IndexId getIndexId() {
|
||||
return indexId;
|
||||
}
|
||||
|
||||
public List<CacheIndexInputStats> getStats() {
|
||||
return inputStats;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("snapshot_uuid", getSnapshotId().getUUID());
|
||||
builder.field("index_uuid", getIndexId().getId());
|
||||
builder.startObject("shard");
|
||||
{
|
||||
builder.field("state", shardRouting.state());
|
||||
builder.field("primary", shardRouting.primary());
|
||||
builder.field("node", shardRouting.currentNodeId());
|
||||
if (shardRouting.relocatingNodeId() != null) {
|
||||
builder.field("relocating_node", shardRouting.relocatingNodeId());
|
||||
}
|
||||
}
|
||||
builder.endObject();
|
||||
builder.startArray("files");
|
||||
{
|
||||
List<CacheIndexInputStats> stats = inputStats.stream()
|
||||
.sorted(Comparator.comparing(CacheIndexInputStats::getFileName)).collect(toList());
|
||||
for (CacheIndexInputStats stat : stats) {
|
||||
stat.toXContent(builder, params);
|
||||
}
|
||||
}
|
||||
builder.endArray();
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
SearchableSnapshotShardStats that = (SearchableSnapshotShardStats) other;
|
||||
return Objects.equals(shardRouting, that.shardRouting)
|
||||
&& Objects.equals(snapshotId, that.snapshotId)
|
||||
&& Objects.equals(indexId, that.indexId)
|
||||
&& Objects.equals(inputStats, that.inputStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(shardRouting, snapshotId, indexId, inputStats);
|
||||
}
|
||||
|
||||
public static class CacheIndexInputStats implements Writeable, ToXContentObject {
|
||||
|
||||
private final String fileName;
|
||||
private final long fileLength;
|
||||
|
||||
private final long openCount;
|
||||
private final long closeCount;
|
||||
|
||||
private final Counter forwardSmallSeeks;
|
||||
private final Counter backwardSmallSeeks;
|
||||
private final Counter forwardLargeSeeks;
|
||||
private final Counter backwardLargeSeeks;
|
||||
private final Counter contiguousReads;
|
||||
private final Counter nonContiguousReads;
|
||||
private final Counter cachedBytesRead;
|
||||
private final TimedCounter cachedBytesWritten;
|
||||
private final TimedCounter directBytesRead;
|
||||
private final TimedCounter optimizedBytesRead;
|
||||
|
||||
public CacheIndexInputStats(String fileName, long fileLength, long openCount, long closeCount,
|
||||
Counter forwardSmallSeeks, Counter backwardSmallSeeks,
|
||||
Counter forwardLargeSeeks, Counter backwardLargeSeeks,
|
||||
Counter contiguousReads, Counter nonContiguousReads,
|
||||
Counter cachedBytesRead, TimedCounter cachedBytesWritten,
|
||||
TimedCounter directBytesRead, TimedCounter optimizedBytesRead) {
|
||||
this.fileName = fileName;
|
||||
this.fileLength = fileLength;
|
||||
this.openCount = openCount;
|
||||
this.closeCount = closeCount;
|
||||
this.forwardSmallSeeks = forwardSmallSeeks;
|
||||
this.backwardSmallSeeks = backwardSmallSeeks;
|
||||
this.forwardLargeSeeks = forwardLargeSeeks;
|
||||
this.backwardLargeSeeks = backwardLargeSeeks;
|
||||
this.contiguousReads = contiguousReads;
|
||||
this.nonContiguousReads = nonContiguousReads;
|
||||
this.cachedBytesRead = cachedBytesRead;
|
||||
this.cachedBytesWritten = cachedBytesWritten;
|
||||
this.directBytesRead = directBytesRead;
|
||||
this.optimizedBytesRead = optimizedBytesRead;
|
||||
}
|
||||
|
||||
CacheIndexInputStats(final StreamInput in) throws IOException {
|
||||
this.fileName = in.readString();
|
||||
this.fileLength = in.readVLong();
|
||||
this.openCount = in.readVLong();
|
||||
this.closeCount = in.readVLong();
|
||||
this.forwardSmallSeeks = new Counter(in);
|
||||
this.backwardSmallSeeks = new Counter(in);
|
||||
this.forwardLargeSeeks = new Counter(in);
|
||||
this.backwardLargeSeeks = new Counter(in);
|
||||
this.contiguousReads = new Counter(in);
|
||||
this.nonContiguousReads = new Counter(in);
|
||||
this.cachedBytesRead = new Counter(in);
|
||||
this.cachedBytesWritten = new TimedCounter(in);
|
||||
this.directBytesRead = new TimedCounter(in);
|
||||
this.optimizedBytesRead = new TimedCounter(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(fileName);
|
||||
out.writeVLong(fileLength);
|
||||
out.writeVLong(openCount);
|
||||
out.writeVLong(closeCount);
|
||||
|
||||
forwardSmallSeeks.writeTo(out);
|
||||
backwardSmallSeeks.writeTo(out);
|
||||
forwardLargeSeeks.writeTo(out);
|
||||
backwardLargeSeeks.writeTo(out);
|
||||
contiguousReads.writeTo(out);
|
||||
nonContiguousReads.writeTo(out);
|
||||
cachedBytesRead.writeTo(out);
|
||||
cachedBytesWritten.writeTo(out);
|
||||
directBytesRead.writeTo(out);
|
||||
optimizedBytesRead.writeTo(out);
|
||||
}
|
||||
|
||||
public String getFileName() {
|
||||
return fileName;
|
||||
}
|
||||
|
||||
public long getFileLength() {
|
||||
return fileLength;
|
||||
}
|
||||
|
||||
public long getOpenCount() {
|
||||
return openCount;
|
||||
}
|
||||
|
||||
public long getCloseCount() {
|
||||
return closeCount;
|
||||
}
|
||||
|
||||
public Counter getForwardSmallSeeks() {
|
||||
return forwardSmallSeeks;
|
||||
}
|
||||
|
||||
public Counter getBackwardSmallSeeks() {
|
||||
return backwardSmallSeeks;
|
||||
}
|
||||
|
||||
public Counter getForwardLargeSeeks() {
|
||||
return forwardLargeSeeks;
|
||||
}
|
||||
|
||||
public Counter getBackwardLargeSeeks() {
|
||||
return backwardLargeSeeks;
|
||||
}
|
||||
|
||||
public Counter getContiguousReads() {
|
||||
return contiguousReads;
|
||||
}
|
||||
|
||||
public Counter getNonContiguousReads() {
|
||||
return nonContiguousReads;
|
||||
}
|
||||
|
||||
public Counter getCachedBytesRead() {
|
||||
return cachedBytesRead;
|
||||
}
|
||||
|
||||
public TimedCounter getCachedBytesWritten() {
|
||||
return cachedBytesWritten;
|
||||
}
|
||||
|
||||
public TimedCounter getDirectBytesRead() {
|
||||
return directBytesRead;
|
||||
}
|
||||
|
||||
public TimedCounter getOptimizedBytesRead() {
|
||||
return optimizedBytesRead;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("name", getFileName());
|
||||
builder.field("length", getFileLength());
|
||||
builder.field("open_count", getOpenCount());
|
||||
builder.field("close_count", getCloseCount());
|
||||
builder.field("contiguous_bytes_read", getContiguousReads());
|
||||
builder.field("non_contiguous_bytes_read", getNonContiguousReads());
|
||||
builder.field("cached_bytes_read", getCachedBytesRead());
|
||||
builder.field("cached_bytes_written", getCachedBytesWritten());
|
||||
builder.field("direct_bytes_read", getDirectBytesRead());
|
||||
builder.field("optimized_bytes_read", getOptimizedBytesRead());
|
||||
{
|
||||
builder.startObject("forward_seeks");
|
||||
builder.field("small", getForwardSmallSeeks());
|
||||
builder.field("large", getForwardLargeSeeks());
|
||||
builder.endObject();
|
||||
}
|
||||
{
|
||||
builder.startObject("backward_seeks");
|
||||
builder.field("small", getBackwardSmallSeeks());
|
||||
builder.field("large", getBackwardLargeSeeks());
|
||||
builder.endObject();
|
||||
}
|
||||
}
|
||||
return builder.endObject();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
CacheIndexInputStats stats = (CacheIndexInputStats) other;
|
||||
return fileLength == stats.fileLength
|
||||
&& openCount == stats.openCount
|
||||
&& closeCount == stats.closeCount
|
||||
&& Objects.equals(fileName, stats.fileName)
|
||||
&& Objects.equals(forwardSmallSeeks, stats.forwardSmallSeeks)
|
||||
&& Objects.equals(backwardSmallSeeks, stats.backwardSmallSeeks)
|
||||
&& Objects.equals(forwardLargeSeeks, stats.forwardLargeSeeks)
|
||||
&& Objects.equals(backwardLargeSeeks, stats.backwardLargeSeeks)
|
||||
&& Objects.equals(contiguousReads, stats.contiguousReads)
|
||||
&& Objects.equals(nonContiguousReads, stats.nonContiguousReads)
|
||||
&& Objects.equals(cachedBytesRead, stats.cachedBytesRead)
|
||||
&& Objects.equals(cachedBytesWritten, stats.cachedBytesWritten)
|
||||
&& Objects.equals(directBytesRead, stats.directBytesRead)
|
||||
&& Objects.equals(optimizedBytesRead, stats.optimizedBytesRead);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(fileName, fileLength, openCount, closeCount,
|
||||
forwardSmallSeeks, backwardSmallSeeks,
|
||||
forwardLargeSeeks, backwardLargeSeeks,
|
||||
contiguousReads, nonContiguousReads,
|
||||
cachedBytesRead, cachedBytesWritten,
|
||||
directBytesRead, optimizedBytesRead);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Counter implements Writeable, ToXContentObject {
|
||||
|
||||
private final long count;
|
||||
private final long total;
|
||||
private final long min;
|
||||
private final long max;
|
||||
|
||||
public Counter(final long count, final long total, final long min, final long max) {
|
||||
this.count = count;
|
||||
this.total = total;
|
||||
this.min = min;
|
||||
this.max = max;
|
||||
}
|
||||
|
||||
Counter(final StreamInput in) throws IOException {
|
||||
this.count = in.readZLong();
|
||||
this.total = in.readZLong();
|
||||
this.min = in.readZLong();
|
||||
this.max = in.readZLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
out.writeZLong(count);
|
||||
out.writeZLong(total);
|
||||
out.writeZLong(min);
|
||||
out.writeZLong(max);
|
||||
}
|
||||
|
||||
@Override
|
||||
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
{
|
||||
builder.field("count", count);
|
||||
builder.field("sum", total);
|
||||
builder.field("min", min);
|
||||
builder.field("max", max);
|
||||
innerToXContent(builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
void innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
}
|
||||
|
||||
public long getCount() {
|
||||
return count;
|
||||
}
|
||||
|
||||
public long getTotal() {
|
||||
return total;
|
||||
}
|
||||
|
||||
public long getMin() {
|
||||
return min;
|
||||
}
|
||||
|
||||
public long getMax() {
|
||||
return max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
Counter that = (Counter) other;
|
||||
return count == that.count
|
||||
&& total == that.total
|
||||
&& min == that.min
|
||||
&& max == that.max;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(count, total, min, max);
|
||||
}
|
||||
}
|
||||
|
||||
public static class TimedCounter extends Counter {
|
||||
|
||||
private final long totalNanoseconds;
|
||||
|
||||
public TimedCounter(long count, long total, long min, long max, long totalNanoseconds) {
|
||||
super(count, total, min, max);
|
||||
this.totalNanoseconds = totalNanoseconds;
|
||||
}
|
||||
|
||||
TimedCounter(StreamInput in) throws IOException {
|
||||
super(in);
|
||||
totalNanoseconds = in.readZLong();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeZLong(totalNanoseconds);
|
||||
}
|
||||
|
||||
@Override
|
||||
void innerToXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (builder.humanReadable()) {
|
||||
builder.field("time", TimeValue.timeValueNanos(totalNanoseconds).toString());
|
||||
}
|
||||
builder.field("time_in_nanos", totalNanoseconds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object other) {
|
||||
if (this == other) {
|
||||
return true;
|
||||
}
|
||||
if (other == null || getClass() != other.getClass()) {
|
||||
return false;
|
||||
}
|
||||
if (super.equals(other) == false) {
|
||||
return false;
|
||||
}
|
||||
TimedCounter that = (TimedCounter) other;
|
||||
return totalNanoseconds == that.totalNanoseconds;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(super.hashCode(), totalNanoseconds);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
@ -10,16 +10,11 @@ import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseField;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
@ -31,14 +26,13 @@ import org.elasticsearch.xpack.core.scheduler.Cron;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.MetadataCreateIndexService.MAX_INDEX_NAME_BYTES;
|
||||
import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.generateSnapshotName;
|
||||
import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.validateGeneratedSnapshotName;
|
||||
|
||||
/**
|
||||
* A {@code SnapshotLifecyclePolicy} is a policy for the cluster including a schedule of when a
|
||||
@ -62,8 +56,6 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable<SnapshotLifecycleP
|
||||
private static final ParseField REPOSITORY = new ParseField("repository");
|
||||
private static final ParseField CONFIG = new ParseField("config");
|
||||
private static final ParseField RETENTION = new ParseField("retention");
|
||||
private static final IndexNameExpressionResolver.DateMathExpressionResolver DATE_MATH_RESOLVER =
|
||||
new IndexNameExpressionResolver.DateMathExpressionResolver();
|
||||
private static final String METADATA_FIELD_NAME = "metadata";
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
@ -160,22 +152,10 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable<SnapshotLifecycleP
|
||||
|
||||
// Snapshot name validation
|
||||
// We generate a snapshot name here to make sure it validates after applying date math
|
||||
final String snapshotName = generateSnapshotName(new ResolverContext());
|
||||
if (Strings.hasText(name) == false) {
|
||||
err.addValidationError("invalid snapshot name [" + name + "]: cannot be empty");
|
||||
}
|
||||
if (snapshotName.contains("#")) {
|
||||
err.addValidationError("invalid snapshot name [" + name + "]: must not contain '#'");
|
||||
}
|
||||
if (snapshotName.charAt(0) == '_') {
|
||||
err.addValidationError("invalid snapshot name [" + name + "]: must not start with '_'");
|
||||
}
|
||||
if (snapshotName.toLowerCase(Locale.ROOT).equals(snapshotName) == false) {
|
||||
err.addValidationError("invalid snapshot name [" + name + "]: must be lowercase");
|
||||
}
|
||||
if (Strings.validFileName(snapshotName) == false) {
|
||||
err.addValidationError("invalid snapshot name [" + name + "]: must not contain contain the following characters " +
|
||||
Strings.INVALID_FILENAME_CHARS);
|
||||
final String snapshotName = generateSnapshotName(this.name);
|
||||
ActionRequestValidationException nameValidationErrors = validateGeneratedSnapshotName(name, snapshotName);
|
||||
if(nameValidationErrors != null) {
|
||||
err.addValidationErrors(nameValidationErrors.validationErrors());
|
||||
}
|
||||
|
||||
// Schedule validation
|
||||
@ -234,26 +214,12 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable<SnapshotLifecycleP
|
||||
return newMetadata;
|
||||
}
|
||||
|
||||
/**
|
||||
* Since snapshots need to be uniquely named, this method will resolve any date math used in
|
||||
* the provided name, as well as appending a unique identifier so expressions that may overlap
|
||||
* still result in unique snapshot names.
|
||||
*/
|
||||
public String generateSnapshotName(Context context) {
|
||||
List<String> candidates = DATE_MATH_RESOLVER.resolve(context, Collections.singletonList(this.name));
|
||||
if (candidates.size() != 1) {
|
||||
throw new IllegalStateException("resolving snapshot name " + this.name + " generated more than one candidate: " + candidates);
|
||||
}
|
||||
// TODO: we are breaking the rules of UUIDs by lowercasing this here, find an alternative (snapshot names must be lowercase)
|
||||
return candidates.get(0) + "-" + UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a new create snapshot request from this policy. The name of the snapshot is
|
||||
* generated at this time based on any date math expressions in the "name" field.
|
||||
*/
|
||||
public CreateSnapshotRequest toRequest() {
|
||||
CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(new ResolverContext()));
|
||||
CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(this.name));
|
||||
Map<String, Object> mergedConfiguration = configuration == null ? new HashMap<>() : new HashMap<>(configuration);
|
||||
@SuppressWarnings("unchecked")
|
||||
Map<String, Object> metadata = (Map<String, Object>) mergedConfiguration.get("metadata");
|
||||
@ -324,28 +290,4 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable<SnapshotLifecycleP
|
||||
return Strings.toString(this);
|
||||
}
|
||||
|
||||
/**
|
||||
* This is a context for the DateMathExpressionResolver, which does not require
|
||||
* {@code IndicesOptions} or {@code ClusterState} since it only uses the start
|
||||
* time to resolve expressions
|
||||
*/
|
||||
public static final class ResolverContext extends Context {
|
||||
public ResolverContext() {
|
||||
this(System.currentTimeMillis());
|
||||
}
|
||||
|
||||
public ResolverContext(long startTime) {
|
||||
super(null, null, startTime, false, false);
|
||||
}
|
||||
|
||||
@Override
|
||||
public ClusterState getState() {
|
||||
throw new UnsupportedOperationException("should never be called");
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndicesOptions getOptions() {
|
||||
throw new UnsupportedOperationException("should never be called");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,145 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class AsyncActionBranchingStepTests extends AbstractStepMasterTimeoutTestCase<AsyncActionBranchingStep> {
|
||||
|
||||
@Override
|
||||
protected AsyncActionBranchingStep createRandomInstance() {
|
||||
return new AsyncActionBranchingStep(new UpdateSettingsStep(randomStepKey(), randomStepKey(), client, Settings.EMPTY),
|
||||
randomStepKey(), client);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AsyncActionBranchingStep mutateInstance(AsyncActionBranchingStep instance) {
|
||||
AsyncActionStep wrappedStep = instance.getStepToExecute();
|
||||
Step.StepKey nextKeyOnIncompleteResponse = instance.getNextKeyOnIncompleteResponse();
|
||||
|
||||
switch (between(0, 1)) {
|
||||
case 0:
|
||||
wrappedStep = new UpdateSettingsStep(randomStepKey(), randomStepKey(), client, Settings.EMPTY);
|
||||
break;
|
||||
case 1:
|
||||
nextKeyOnIncompleteResponse = randomStepKey();
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new AsyncActionBranchingStep(wrappedStep, nextKeyOnIncompleteResponse, client);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AsyncActionBranchingStep copyInstance(AsyncActionBranchingStep instance) {
|
||||
return new AsyncActionBranchingStep(instance.getStepToExecute(), instance.getNextKeyOnIncompleteResponse(), instance.getClient());
|
||||
}
|
||||
|
||||
protected IndexMetadata getIndexMetadata() {
|
||||
return IndexMetadata.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build();
|
||||
}
|
||||
|
||||
public void testBranchStepKeyIsTheWrappedStepKey() {
|
||||
AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) {
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer,
|
||||
Listener listener) {
|
||||
}
|
||||
};
|
||||
|
||||
AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, randomStepKey(), client);
|
||||
assertThat(asyncActionBranchingStep.getKey(), is(stepToExecute.getKey()));
|
||||
}
|
||||
|
||||
public void testBranchStepNextKeyOnCompleteResponse() {
|
||||
AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) {
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer,
|
||||
Listener listener) {
|
||||
listener.onResponse(true);
|
||||
}
|
||||
};
|
||||
|
||||
AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, randomStepKey(), client);
|
||||
|
||||
asyncActionBranchingStep.performAction(getIndexMetadata(), emptyClusterState(), null, new AsyncActionStep.Listener() {
|
||||
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("not expecting a failure as the wrapped step was successful");
|
||||
}
|
||||
});
|
||||
assertThat(asyncActionBranchingStep.getNextStepKey(), is(stepToExecute.getNextStepKey()));
|
||||
}
|
||||
|
||||
public void testBranchStepNextKeyOnInCompleteResponse() {
|
||||
AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) {
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer,
|
||||
Listener listener) {
|
||||
listener.onResponse(false);
|
||||
}
|
||||
};
|
||||
|
||||
Step.StepKey nextKeyOnIncompleteResponse = randomStepKey();
|
||||
AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, nextKeyOnIncompleteResponse,
|
||||
client);
|
||||
|
||||
asyncActionBranchingStep.performAction(getIndexMetadata(), emptyClusterState(), null, new AsyncActionStep.Listener() {
|
||||
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(false));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("not expecting a failure as the wrapped step was successful");
|
||||
}
|
||||
});
|
||||
assertThat(asyncActionBranchingStep.getNextStepKey(), is(nextKeyOnIncompleteResponse));
|
||||
}
|
||||
|
||||
public void testBranchStepPropagatesFailure() {
|
||||
NullPointerException failException = new NullPointerException("fail");
|
||||
AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) {
|
||||
@Override
|
||||
public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer,
|
||||
Listener listener) {
|
||||
listener.onFailure(failException);
|
||||
}
|
||||
};
|
||||
|
||||
AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, randomStepKey(), client);
|
||||
|
||||
asyncActionBranchingStep.performAction(getIndexMetadata(), emptyClusterState(), null, new AsyncActionStep.Listener() {
|
||||
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
fail("expecting a failure as the wrapped step failed");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
assertThat(e, is(failException));
|
||||
}
|
||||
});
|
||||
expectThrows(IllegalStateException.class, () -> asyncActionBranchingStep.getNextStepKey());
|
||||
}
|
||||
}
|
@ -0,0 +1,156 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ActionType;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class CleanupSnapshotStepTests extends AbstractStepTestCase<CleanupSnapshotStep> {
|
||||
|
||||
@Override
|
||||
public CleanupSnapshotStep createRandomInstance() {
|
||||
StepKey stepKey = randomStepKey();
|
||||
StepKey nextStepKey = randomStepKey();
|
||||
return new CleanupSnapshotStep(stepKey, nextStepKey, client);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CleanupSnapshotStep copyInstance(CleanupSnapshotStep instance) {
|
||||
return new CleanupSnapshotStep(instance.getKey(), instance.getNextStepKey(), instance.getClient());
|
||||
}
|
||||
|
||||
@Override
|
||||
public CleanupSnapshotStep mutateInstance(CleanupSnapshotStep instance) {
|
||||
StepKey key = instance.getKey();
|
||||
StepKey nextKey = instance.getNextStepKey();
|
||||
switch (between(0, 1)) {
|
||||
case 0:
|
||||
key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 1:
|
||||
nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new CleanupSnapshotStep(key, nextKey, instance.getClient());
|
||||
}
|
||||
|
||||
public void testPerformActionDoesntFailIfSnapshotInfoIsMissing() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
|
||||
{
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
CleanupSnapshotStep cleanupSnapshotStep = createRandomInstance();
|
||||
cleanupSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("expecting the step to report success if repository information is missing from the ILM execution state as there" +
|
||||
" is no snapshot to delete");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
Map<String, String> ilmCustom = org.elasticsearch.common.collect.Map.of("snapshot_repository", "repository_name");
|
||||
indexMetadataBuilder.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom);
|
||||
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
CleanupSnapshotStep cleanupSnapshotStep = createRandomInstance();
|
||||
cleanupSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("expecting the step to report success if the snapshot name is missing from the ILM execution state as there is " +
|
||||
"no snapshot to delete");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
String snapshotName = indexName + "-" + policyName;
|
||||
Map<String, String> ilmCustom = org.elasticsearch.common.collect.Map.of("snapshot_name", snapshotName);
|
||||
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom)
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
try (NoOpClient client = getDeleteSnapshotRequestAssertingClient(snapshotName)) {
|
||||
CleanupSnapshotStep step = new CleanupSnapshotStep(randomStepKey(), randomStepKey(), client);
|
||||
step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private NoOpClient getDeleteSnapshotRequestAssertingClient(String expectedSnapshotName) {
|
||||
return new NoOpClient(getTestName()) {
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action,
|
||||
Request request,
|
||||
ActionListener<Response> listener) {
|
||||
assertThat(action.name(), is(DeleteSnapshotAction.NAME));
|
||||
assertTrue(request instanceof DeleteSnapshotRequest);
|
||||
assertThat(((DeleteSnapshotRequest) request).snapshot(), equalTo(expectedSnapshotName));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -25,14 +25,16 @@ public class CopyExecutionStateStepTests extends AbstractStepTestCase<CopyExecut
|
||||
StepKey stepKey = randomStepKey();
|
||||
StepKey nextStepKey = randomStepKey();
|
||||
String shrunkIndexPrefix = randomAlphaOfLength(10);
|
||||
return new CopyExecutionStateStep(stepKey, nextStepKey, shrunkIndexPrefix);
|
||||
String nextStepName = randomStepKey().getName();
|
||||
return new CopyExecutionStateStep(stepKey, nextStepKey, shrunkIndexPrefix, nextStepName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CopyExecutionStateStep mutateInstance(CopyExecutionStateStep instance) {
|
||||
StepKey key = instance.getKey();
|
||||
StepKey nextKey = instance.getNextStepKey();
|
||||
String shrunkIndexPrefix = instance.getShrunkIndexPrefix();
|
||||
String shrunkIndexPrefix = instance.getTargetIndexPrefix();
|
||||
String nextStepName = instance.getTargetNextStepName();
|
||||
|
||||
switch (between(0, 2)) {
|
||||
case 0:
|
||||
@ -44,16 +46,20 @@ public class CopyExecutionStateStepTests extends AbstractStepTestCase<CopyExecut
|
||||
case 2:
|
||||
shrunkIndexPrefix += randomAlphaOfLength(5);
|
||||
break;
|
||||
case 3:
|
||||
nextStepName = randomAlphaOfLengthBetween(1, 10);
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
|
||||
return new CopyExecutionStateStep(key, nextKey, shrunkIndexPrefix);
|
||||
return new CopyExecutionStateStep(key, nextKey, shrunkIndexPrefix, nextStepName);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CopyExecutionStateStep copyInstance(CopyExecutionStateStep instance) {
|
||||
return new CopyExecutionStateStep(instance.getKey(), instance.getNextStepKey(), instance.getShrunkIndexPrefix());
|
||||
return new CopyExecutionStateStep(instance.getKey(), instance.getNextStepKey(), instance.getTargetIndexPrefix(),
|
||||
instance.getTargetNextStepName());
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
@ -66,7 +72,7 @@ public class CopyExecutionStateStepTests extends AbstractStepTestCase<CopyExecut
|
||||
.numberOfReplicas(randomIntBetween(1,5))
|
||||
.putCustom(ILM_CUSTOM_METADATA_KEY, customMetadata)
|
||||
.build();
|
||||
IndexMetadata shrunkIndexMetadata = IndexMetadata.builder(step.getShrunkIndexPrefix() + indexName)
|
||||
IndexMetadata shrunkIndexMetadata = IndexMetadata.builder(step.getTargetIndexPrefix() + indexName)
|
||||
.settings(settings(Version.CURRENT)).numberOfShards(randomIntBetween(1,5))
|
||||
.numberOfReplicas(randomIntBetween(1,5))
|
||||
.build();
|
||||
@ -80,12 +86,14 @@ public class CopyExecutionStateStepTests extends AbstractStepTestCase<CopyExecut
|
||||
|
||||
LifecycleExecutionState oldIndexData = LifecycleExecutionState.fromIndexMetadata(originalIndexMetadata);
|
||||
LifecycleExecutionState newIndexData = LifecycleExecutionState
|
||||
.fromIndexMetadata(newClusterState.metadata().index(step.getShrunkIndexPrefix() + indexName));
|
||||
.fromIndexMetadata(newClusterState.metadata().index(step.getTargetIndexPrefix() + indexName));
|
||||
|
||||
assertEquals(oldIndexData.getLifecycleDate(), newIndexData.getLifecycleDate());
|
||||
assertEquals(oldIndexData.getPhase(), newIndexData.getPhase());
|
||||
assertEquals(oldIndexData.getAction(), newIndexData.getAction());
|
||||
assertEquals(ShrunkenIndexCheckStep.NAME, newIndexData.getStep());
|
||||
assertEquals(newIndexData.getLifecycleDate(), oldIndexData.getLifecycleDate());
|
||||
assertEquals(newIndexData.getPhase(), oldIndexData.getPhase());
|
||||
assertEquals(newIndexData.getAction(), oldIndexData.getAction());
|
||||
assertEquals(newIndexData.getStep(), step.getTargetNextStepName());
|
||||
assertEquals(newIndexData.getSnapshotRepository(), oldIndexData.getSnapshotRepository());
|
||||
assertEquals(newIndexData.getSnapshotName(), oldIndexData.getSnapshotName());
|
||||
}
|
||||
public void testPerformActionWithNoTarget() {
|
||||
CopyExecutionStateStep step = createRandomInstance();
|
||||
@ -106,6 +114,6 @@ public class CopyExecutionStateStepTests extends AbstractStepTestCase<CopyExecut
|
||||
() -> step.performAction(originalIndexMetadata.getIndex(), originalClusterState));
|
||||
|
||||
assertThat(e.getMessage(), equalTo("unable to copy execution state from [" +
|
||||
indexName + "] to [" + step.getShrunkIndexPrefix() + indexName + "] as target index does not exist"));
|
||||
indexName + "] to [" + step.getTargetIndexPrefix() + indexName + "] as target index does not exist"));
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,80 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class CopySettingsStepTests extends AbstractStepTestCase<CopySettingsStep> {
|
||||
|
||||
@Override
|
||||
protected CopySettingsStep createRandomInstance() {
|
||||
return new CopySettingsStep(randomStepKey(), randomStepKey(), randomAlphaOfLengthBetween(1, 10),
|
||||
IndexMetadata.SETTING_NUMBER_OF_SHARDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CopySettingsStep mutateInstance(CopySettingsStep instance) {
|
||||
Step.StepKey key = instance.getKey();
|
||||
Step.StepKey nextKey = instance.getNextStepKey();
|
||||
String indexPrefix = instance.getIndexPrefix();
|
||||
String[] settingsKeys = instance.getSettingsKeys();
|
||||
|
||||
switch (between(0, 3)) {
|
||||
case 0:
|
||||
key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 1:
|
||||
nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 2:
|
||||
indexPrefix = randomValueOtherThan(indexPrefix, () -> randomAlphaOfLengthBetween(1, 10));
|
||||
break;
|
||||
case 3:
|
||||
settingsKeys = new String[]{randomAlphaOfLengthBetween(1, 10)};
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new CopySettingsStep(key, nextKey, indexPrefix, settingsKeys);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CopySettingsStep copyInstance(CopySettingsStep instance) {
|
||||
return new CopySettingsStep(instance.getKey(), instance.getNextStepKey(), instance.getIndexPrefix(), instance.getSettingsKeys());
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
IndexMetadata.Builder sourceIndexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
|
||||
String indexPrefix = "test-prefix-";
|
||||
String targetIndex = indexPrefix + indexName;
|
||||
|
||||
IndexMetadata.Builder targetIndexMetadataBuilder = IndexMetadata.builder(targetIndex).settings(settings(Version.CURRENT))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
|
||||
IndexMetadata sourceIndexMetadata = sourceIndexMetadataBuilder.build();
|
||||
ClusterState clusterState = ClusterState.builder(emptyClusterState()).metadata(
|
||||
Metadata.builder().put(sourceIndexMetadata, true).put(targetIndexMetadataBuilder).build()
|
||||
).build();
|
||||
|
||||
CopySettingsStep copySettingsStep = new CopySettingsStep(randomStepKey(), randomStepKey(), indexPrefix,
|
||||
LifecycleSettings.LIFECYCLE_NAME);
|
||||
|
||||
ClusterState newClusterState = copySettingsStep.performAction(sourceIndexMetadata.getIndex(), clusterState);
|
||||
IndexMetadata newTargetIndexMetadata = newClusterState.metadata().index(targetIndex);
|
||||
assertThat(newTargetIndexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME), is(policyName));
|
||||
}
|
||||
}
|
@ -0,0 +1,171 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ActionType;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class CreateSnapshotStepTests extends AbstractStepTestCase<CreateSnapshotStep> {
|
||||
|
||||
@Override
|
||||
public CreateSnapshotStep createRandomInstance() {
|
||||
StepKey stepKey = randomStepKey();
|
||||
StepKey nextStepKey = randomStepKey();
|
||||
return new CreateSnapshotStep(stepKey, nextStepKey, client);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected CreateSnapshotStep copyInstance(CreateSnapshotStep instance) {
|
||||
return new CreateSnapshotStep(instance.getKey(), instance.getNextStepKey(), instance.getClient());
|
||||
}
|
||||
|
||||
@Override
|
||||
public CreateSnapshotStep mutateInstance(CreateSnapshotStep instance) {
|
||||
StepKey key = instance.getKey();
|
||||
StepKey nextKey = instance.getNextStepKey();
|
||||
switch (between(0, 1)) {
|
||||
case 0:
|
||||
key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 1:
|
||||
nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new CreateSnapshotStep(key, nextKey, instance.getClient());
|
||||
}
|
||||
|
||||
public void testPerformActionFailure() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
|
||||
{
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
Map<String, String> ilmCustom = new HashMap<>();
|
||||
String repository = "repository";
|
||||
ilmCustom.put("snapshot_repository", repository);
|
||||
indexMetadataBuilder.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom);
|
||||
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
CreateSnapshotStep createSnapshotStep = createRandomInstance();
|
||||
createSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
fail("expecting a failure as the index doesn't have any snapshot name in its ILM execution state");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
assertThat(e, instanceOf(IllegalStateException.class));
|
||||
assertThat(e.getMessage(),
|
||||
is("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]"));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
CreateSnapshotStep createSnapshotStep = createRandomInstance();
|
||||
createSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
fail("expecting a failure as the index doesn't have any snapshot name in its ILM execution state");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
assertThat(e, instanceOf(IllegalStateException.class));
|
||||
assertThat(e.getMessage(),
|
||||
is("snapshot repository is not present for policy [" + policyName + "] and index [" + indexName + "]"));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
Map<String, String> ilmCustom = new HashMap<>();
|
||||
String snapshotName = indexName + "-" + policyName;
|
||||
ilmCustom.put("snapshot_name", snapshotName);
|
||||
String repository = "repository";
|
||||
ilmCustom.put("snapshot_repository", repository);
|
||||
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom)
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
try (NoOpClient client = getCreateSnapshotRequestAssertingClient(repository, snapshotName, indexName)) {
|
||||
CreateSnapshotStep step = new CreateSnapshotStep(randomStepKey(), randomStepKey(), client);
|
||||
step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private NoOpClient getCreateSnapshotRequestAssertingClient(String expectedRepoName, String expectedSnapshotName, String indexName) {
|
||||
return new NoOpClient(getTestName()) {
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action,
|
||||
Request request,
|
||||
ActionListener<Response> listener) {
|
||||
assertThat(action.name(), is(CreateSnapshotAction.NAME));
|
||||
assertTrue(request instanceof CreateSnapshotRequest);
|
||||
CreateSnapshotRequest createSnapshotRequest = (CreateSnapshotRequest) request;
|
||||
assertThat(createSnapshotRequest.indices().length, is(1));
|
||||
assertThat(createSnapshotRequest.indices()[0], is(indexName));
|
||||
assertThat(createSnapshotRequest.repository(), is(expectedRepoName));
|
||||
assertThat(createSnapshotRequest.snapshot(), is(expectedSnapshotName));
|
||||
assertThat(CreateSnapshotStep.NAME + " waits for the create snapshot request to complete",
|
||||
createSnapshotRequest.waitForCompletion(), is(true));
|
||||
assertThat("ILM generated snapshots should not include global state", createSnapshotRequest.includeGlobalState(),
|
||||
is(false));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -30,20 +30,39 @@ public class DeleteActionTests extends AbstractActionTestCase<DeleteAction> {
|
||||
}
|
||||
|
||||
public void testToSteps() {
|
||||
DeleteAction action = createTestInstance();
|
||||
String phase = randomAlphaOfLengthBetween(1, 10);
|
||||
StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10),
|
||||
randomAlphaOfLengthBetween(1, 10));
|
||||
List<Step> steps = action.toSteps(null, phase, nextStepKey);
|
||||
assertNotNull(steps);
|
||||
assertEquals(2, steps.size());
|
||||
StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, WaitForNoFollowersStep.NAME);
|
||||
StepKey expectedSecondStepKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME);
|
||||
WaitForNoFollowersStep firstStep = (WaitForNoFollowersStep) steps.get(0);
|
||||
DeleteStep secondStep = (DeleteStep) steps.get(1);
|
||||
assertEquals(expectedFirstStepKey, firstStep.getKey());
|
||||
assertEquals(expectedSecondStepKey, firstStep.getNextStepKey());
|
||||
assertEquals(expectedSecondStepKey, secondStep.getKey());
|
||||
assertEquals(nextStepKey, secondStep.getNextStepKey());
|
||||
randomAlphaOfLengthBetween(1, 10));
|
||||
{
|
||||
DeleteAction action = new DeleteAction(true);
|
||||
List<Step> steps = action.toSteps(null, phase, nextStepKey);
|
||||
assertNotNull(steps);
|
||||
assertEquals(3, steps.size());
|
||||
StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, WaitForNoFollowersStep.NAME);
|
||||
StepKey expectedSecondStepKey = new StepKey(phase, DeleteAction.NAME, CleanupSnapshotStep.NAME);
|
||||
StepKey expectedThirdKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME);
|
||||
WaitForNoFollowersStep firstStep = (WaitForNoFollowersStep) steps.get(0);
|
||||
CleanupSnapshotStep secondStep = (CleanupSnapshotStep) steps.get(1);
|
||||
DeleteStep thirdStep = (DeleteStep) steps.get(2);
|
||||
assertEquals(expectedFirstStepKey, firstStep.getKey());
|
||||
assertEquals(expectedSecondStepKey, firstStep.getNextStepKey());
|
||||
assertEquals(expectedSecondStepKey, secondStep.getKey());
|
||||
assertEquals(expectedThirdKey, thirdStep.getKey());
|
||||
assertEquals(nextStepKey, thirdStep.getNextStepKey());
|
||||
}
|
||||
|
||||
{
|
||||
DeleteAction actionKeepsSnapshot = new DeleteAction(false);
|
||||
List<Step> steps = actionKeepsSnapshot.toSteps(null, phase, nextStepKey);
|
||||
StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, WaitForNoFollowersStep.NAME);
|
||||
StepKey expectedSecondStepKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME);
|
||||
assertEquals(2, steps.size());
|
||||
assertNotNull(steps);
|
||||
WaitForNoFollowersStep firstStep = (WaitForNoFollowersStep) steps.get(0);
|
||||
DeleteStep secondStep = (DeleteStep) steps.get(1);
|
||||
assertEquals(expectedFirstStepKey, firstStep.getKey());
|
||||
assertEquals(expectedSecondStepKey, firstStep.getNextStepKey());
|
||||
assertEquals(nextStepKey, secondStep.getNextStepKey());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,128 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState;
|
||||
import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.generateSnapshotName;
|
||||
import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.validateGeneratedSnapshotName;
|
||||
import static org.hamcrest.Matchers.containsInAnyOrder;
|
||||
import static org.hamcrest.Matchers.containsStringIgnoringCase;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
public class GenerateSnapshotNameStepTests extends AbstractStepTestCase<GenerateSnapshotNameStep> {
|
||||
|
||||
@Override
|
||||
protected GenerateSnapshotNameStep createRandomInstance() {
|
||||
return new GenerateSnapshotNameStep(randomStepKey(), randomStepKey(), randomAlphaOfLengthBetween(5, 10));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GenerateSnapshotNameStep mutateInstance(GenerateSnapshotNameStep instance) {
|
||||
Step.StepKey key = instance.getKey();
|
||||
Step.StepKey nextKey = instance.getNextStepKey();
|
||||
String snapshotRepository = instance.getSnapshotRepository();
|
||||
|
||||
switch (between(0, 2)) {
|
||||
case 0:
|
||||
key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 1:
|
||||
nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 2:
|
||||
snapshotRepository = randomValueOtherThan(snapshotRepository, () -> randomAlphaOfLengthBetween(5, 10));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new GenerateSnapshotNameStep(key, nextKey, snapshotRepository);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GenerateSnapshotNameStep copyInstance(GenerateSnapshotNameStep instance) {
|
||||
return new GenerateSnapshotNameStep(instance.getKey(), instance.getNextStepKey(), instance.getSnapshotRepository());
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
|
||||
IndexMetadata indexMetadata = indexMetadataBuilder.build();
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetadata, true).build()).build();
|
||||
|
||||
GenerateSnapshotNameStep generateSnapshotNameStep = createRandomInstance();
|
||||
ClusterState newClusterState = generateSnapshotNameStep.performAction(indexMetadata.getIndex(), clusterState);
|
||||
|
||||
LifecycleExecutionState executionState = LifecycleExecutionState.fromIndexMetadata(newClusterState.metadata().index(indexName));
|
||||
assertThat("the " + GenerateSnapshotNameStep.NAME + " step must generate a snapshot name", executionState.getSnapshotName(),
|
||||
notNullValue());
|
||||
assertThat(executionState.getSnapshotRepository(), is(generateSnapshotNameStep.getSnapshotRepository()));
|
||||
assertThat(executionState.getSnapshotName(), containsStringIgnoringCase(indexName));
|
||||
assertThat(executionState.getSnapshotName(), containsStringIgnoringCase(policyName));
|
||||
}
|
||||
|
||||
public void testNameGeneration() {
|
||||
long time = 1552684146542L; // Fri Mar 15 2019 21:09:06 UTC
|
||||
assertThat(generateSnapshotName("name"), startsWith("name-"));
|
||||
assertThat(generateSnapshotName("name").length(), greaterThan("name-".length()));
|
||||
|
||||
GenerateSnapshotNameStep.ResolverContext resolverContext = new GenerateSnapshotNameStep.ResolverContext(time);
|
||||
assertThat(generateSnapshotName("<name-{now}>", resolverContext), startsWith("name-2019.03.15-"));
|
||||
assertThat(generateSnapshotName("<name-{now}>", resolverContext).length(), greaterThan("name-2019.03.15-".length()));
|
||||
|
||||
assertThat(generateSnapshotName("<name-{now/M}>", resolverContext), startsWith("name-2019.03.01-"));
|
||||
|
||||
assertThat(generateSnapshotName("<name-{now/m{yyyy-MM-dd.HH:mm:ss}}>", resolverContext), startsWith("name-2019-03-15.21:09:00-"));
|
||||
}
|
||||
|
||||
public void testNameValidation() {
|
||||
assertThat(validateGeneratedSnapshotName("name-", generateSnapshotName("name-")), nullValue());
|
||||
assertThat(validateGeneratedSnapshotName("<name-{now}>", generateSnapshotName("<name-{now}>")), nullValue());
|
||||
|
||||
{
|
||||
ActionRequestValidationException validationException = validateGeneratedSnapshotName("", generateSnapshotName(""));
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name []: cannot be empty"));
|
||||
}
|
||||
{
|
||||
ActionRequestValidationException validationException = validateGeneratedSnapshotName("#start", generateSnapshotName("#start"));
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [#start]: must not contain '#'"));
|
||||
}
|
||||
{
|
||||
ActionRequestValidationException validationException = validateGeneratedSnapshotName("_start", generateSnapshotName("_start"));
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [_start]: must not start with " +
|
||||
"'_'"));
|
||||
}
|
||||
{
|
||||
ActionRequestValidationException validationException = validateGeneratedSnapshotName("aBcD", generateSnapshotName("aBcD"));
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [aBcD]: must be lowercase"));
|
||||
}
|
||||
{
|
||||
ActionRequestValidationException validationException = validateGeneratedSnapshotName("na>me", generateSnapshotName("na>me"));
|
||||
assertThat(validationException, notNullValue());
|
||||
assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [na>me]: must not contain " +
|
||||
"contain the following characters " + Strings.INVALID_FILENAME_CHARS));
|
||||
}
|
||||
}
|
||||
}
|
@ -57,6 +57,8 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC
|
||||
stepNull ? null : randomNonNegativeLong(),
|
||||
stepNull ? null : randomNonNegativeLong(),
|
||||
stepNull ? null : randomNonNegativeLong(),
|
||||
stepNull ? null : randomAlphaOfLength(10),
|
||||
stepNull ? null : randomAlphaOfLength(10),
|
||||
randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()),
|
||||
randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo(""));
|
||||
}
|
||||
@ -76,6 +78,8 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC
|
||||
randomBoolean() ? null : randomNonNegativeLong(),
|
||||
randomBoolean() ? null : randomNonNegativeLong(),
|
||||
randomBoolean() ? null : randomNonNegativeLong(),
|
||||
randomBoolean() ? null : randomAlphaOfLength(10),
|
||||
randomBoolean() ? null : randomAlphaOfLength(10),
|
||||
randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()),
|
||||
randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")));
|
||||
assertThat(exception.getMessage(), startsWith("managed index response must have complete step details"));
|
||||
@ -116,11 +120,13 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC
|
||||
Long phaseTime = instance.getPhaseTime();
|
||||
Long actionTime = instance.getActionTime();
|
||||
Long stepTime = instance.getStepTime();
|
||||
String repositoryName = instance.getRepositoryName();
|
||||
String snapshotName = instance.getSnapshotName();
|
||||
boolean managed = instance.managedByILM();
|
||||
BytesReference stepInfo = instance.getStepInfo();
|
||||
PhaseExecutionInfo phaseExecutionInfo = instance.getPhaseExecutionInfo();
|
||||
if (managed) {
|
||||
switch (between(0, 11)) {
|
||||
switch (between(0, 13)) {
|
||||
case 0:
|
||||
index = index + randomAlphaOfLengthBetween(1, 5);
|
||||
break;
|
||||
@ -172,11 +178,18 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC
|
||||
isAutoRetryableError = true;
|
||||
failedStepRetryCount = randomValueOtherThan(failedStepRetryCount, () -> randomInt(10));
|
||||
break;
|
||||
case 12:
|
||||
repositoryName = randomValueOtherThan(repositoryName, () -> randomAlphaOfLengthBetween(5, 10));
|
||||
break;
|
||||
case 13:
|
||||
snapshotName = randomValueOtherThan(snapshotName, () -> randomAlphaOfLengthBetween(5, 10));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return IndexLifecycleExplainResponse.newManagedIndexResponse(index, policy, policyTime, phase, action, step, failedStep,
|
||||
isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo);
|
||||
isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, repositoryName, snapshotName, stepInfo,
|
||||
phaseExecutionInfo);
|
||||
} else {
|
||||
switch (between(0, 1)) {
|
||||
case 0:
|
||||
|
@ -186,12 +186,14 @@ public class LifecycleExecutionStateTests extends ESTestCase {
|
||||
}
|
||||
|
||||
static Map<String, String> createCustomMetadata() {
|
||||
String phase = randomAlphaOfLengthBetween(5,20);
|
||||
String action = randomAlphaOfLengthBetween(5,20);
|
||||
String step = randomAlphaOfLengthBetween(5,20);
|
||||
String failedStep = randomAlphaOfLengthBetween(5,20);
|
||||
String stepInfo = randomAlphaOfLengthBetween(15,50);
|
||||
String phaseDefinition = randomAlphaOfLengthBetween(15,50);
|
||||
String phase = randomAlphaOfLengthBetween(5, 20);
|
||||
String action = randomAlphaOfLengthBetween(5, 20);
|
||||
String step = randomAlphaOfLengthBetween(5, 20);
|
||||
String failedStep = randomAlphaOfLengthBetween(5, 20);
|
||||
String stepInfo = randomAlphaOfLengthBetween(15, 50);
|
||||
String phaseDefinition = randomAlphaOfLengthBetween(15, 50);
|
||||
String repositoryName = randomAlphaOfLengthBetween(10, 20);
|
||||
String snapshotName = randomAlphaOfLengthBetween(10, 20);
|
||||
long indexCreationDate = randomLong();
|
||||
long phaseTime = randomLong();
|
||||
long actionTime = randomLong();
|
||||
@ -208,6 +210,8 @@ public class LifecycleExecutionStateTests extends ESTestCase {
|
||||
customMetadata.put("phase_time", String.valueOf(phaseTime));
|
||||
customMetadata.put("action_time", String.valueOf(actionTime));
|
||||
customMetadata.put("step_time", String.valueOf(stepTime));
|
||||
customMetadata.put("snapshot_repository", repositoryName);
|
||||
customMetadata.put("snapshot_name", snapshotName);
|
||||
return customMetadata;
|
||||
}
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ public class LifecyclePolicyMetadataTests extends AbstractSerializingTestCase<Li
|
||||
(in) -> TimeseriesLifecycleType.INSTANCE),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, WaitForSnapshotAction.NAME, WaitForSnapshotAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new),
|
||||
@ -60,6 +61,8 @@ public class LifecyclePolicyMetadataTests extends AbstractSerializingTestCase<Li
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(AllocateAction.NAME), AllocateAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class,
|
||||
new ParseField(WaitForSnapshotAction.NAME), WaitForSnapshotAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SearchableSnapshotAction.NAME),
|
||||
SearchableSnapshotAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(DeleteAction.NAME), DeleteAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ForceMergeAction.NAME), ForceMergeAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ReadOnlyAction.NAME), ReadOnlyAction::parse),
|
||||
|
@ -56,7 +56,8 @@ public class LifecyclePolicyTests extends AbstractSerializingTestCase<LifecycleP
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, FreezeAction.NAME, FreezeAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new)
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new)
|
||||
));
|
||||
}
|
||||
|
||||
@ -76,7 +77,9 @@ public class LifecyclePolicyTests extends AbstractSerializingTestCase<LifecycleP
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(FreezeAction.NAME), FreezeAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SetPriorityAction.NAME), SetPriorityAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse)
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SearchableSnapshotAction.NAME),
|
||||
SearchableSnapshotAction::parse)
|
||||
));
|
||||
return new NamedXContentRegistry(entries);
|
||||
}
|
||||
@ -129,6 +132,8 @@ public class LifecyclePolicyTests extends AbstractSerializingTestCase<LifecycleP
|
||||
return SetPriorityActionTests.randomInstance();
|
||||
case UnfollowAction.NAME:
|
||||
return new UnfollowAction();
|
||||
case SearchableSnapshotAction.NAME:
|
||||
return new SearchableSnapshotAction(randomAlphaOfLengthBetween(1, 10));
|
||||
default:
|
||||
throw new IllegalArgumentException("invalid action [" + action + "]");
|
||||
}};
|
||||
@ -183,6 +188,8 @@ public class LifecyclePolicyTests extends AbstractSerializingTestCase<LifecycleP
|
||||
return SetPriorityActionTests.randomInstance();
|
||||
case UnfollowAction.NAME:
|
||||
return new UnfollowAction();
|
||||
case SearchableSnapshotAction.NAME:
|
||||
return new SearchableSnapshotAction(randomAlphaOfLengthBetween(1, 10));
|
||||
default:
|
||||
throw new IllegalArgumentException("invalid action [" + action + "]");
|
||||
}};
|
||||
|
@ -0,0 +1,253 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ActionType;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.common.collect.List;
|
||||
import org.elasticsearch.snapshots.RestoreInfo;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||
import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction;
|
||||
import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest;
|
||||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
public class MountSnapshotStepTests extends AbstractStepTestCase<MountSnapshotStep> {
|
||||
|
||||
private static final String RESTORED_INDEX_PREFIX = "restored-";
|
||||
|
||||
@Override
|
||||
public MountSnapshotStep createRandomInstance() {
|
||||
StepKey stepKey = randomStepKey();
|
||||
StepKey nextStepKey = randomStepKey();
|
||||
String restoredIndexPrefix = randomAlphaOfLength(10);
|
||||
return new MountSnapshotStep(stepKey, nextStepKey, client, restoredIndexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected MountSnapshotStep copyInstance(MountSnapshotStep instance) {
|
||||
return new MountSnapshotStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getRestoredIndexPrefix());
|
||||
}
|
||||
|
||||
@Override
|
||||
public MountSnapshotStep mutateInstance(MountSnapshotStep instance) {
|
||||
StepKey key = instance.getKey();
|
||||
StepKey nextKey = instance.getNextStepKey();
|
||||
String restoredIndexPrefix = instance.getRestoredIndexPrefix();
|
||||
switch (between(0, 2)) {
|
||||
case 0:
|
||||
key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 1:
|
||||
nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 2:
|
||||
restoredIndexPrefix = randomValueOtherThan(restoredIndexPrefix, () -> randomAlphaOfLengthBetween(1, 10));
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new MountSnapshotStep(key, nextKey, instance.getClient(), restoredIndexPrefix);
|
||||
}
|
||||
|
||||
public void testPerformActionFailure() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
|
||||
{
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
MountSnapshotStep mountSnapshotStep = createRandomInstance();
|
||||
mountSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
fail("expecting a failure as the index doesn't have any repository name in its ILM execution state");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
assertThat(e, instanceOf(IllegalStateException.class));
|
||||
assertThat(e.getMessage(),
|
||||
is("snapshot repository is not present for policy [" + policyName + "] and index [" + indexName + "]"));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
Map<String, String> ilmCustom = new HashMap<>();
|
||||
String repository = "repository";
|
||||
ilmCustom.put("snapshot_repository", repository);
|
||||
indexMetadataBuilder.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom);
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
MountSnapshotStep mountSnapshotStep = createRandomInstance();
|
||||
mountSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
fail("expecting a failure as the index doesn't have any snapshot name in its ILM execution state");
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
assertThat(e, instanceOf(IllegalStateException.class));
|
||||
assertThat(e.getMessage(),
|
||||
is("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]"));
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
Map<String, String> ilmCustom = new HashMap<>();
|
||||
String snapshotName = indexName + "-" + policyName;
|
||||
ilmCustom.put("snapshot_name", snapshotName);
|
||||
String repository = "repository";
|
||||
ilmCustom.put("snapshot_repository", repository);
|
||||
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom)
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
try (NoOpClient client = getRestoreSnapshotRequestAssertingClient(repository, snapshotName, indexName, RESTORED_INDEX_PREFIX)) {
|
||||
MountSnapshotStep step = new MountSnapshotStep(randomStepKey(), randomStepKey(), client, RESTORED_INDEX_PREFIX);
|
||||
step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("expecting successful response but got: [" + e.getMessage() + "]");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
public void testResponseStatusHandling() {
|
||||
String indexName = randomAlphaOfLength(10);
|
||||
String policyName = "test-ilm-policy";
|
||||
Map<String, String> ilmCustom = new HashMap<>();
|
||||
String snapshotName = indexName + "-" + policyName;
|
||||
ilmCustom.put("snapshot_name", snapshotName);
|
||||
String repository = "repository";
|
||||
ilmCustom.put("snapshot_repository", repository);
|
||||
|
||||
IndexMetadata.Builder indexMetadataBuilder =
|
||||
IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName))
|
||||
.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom)
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
IndexMetadata indexMetaData = indexMetadataBuilder.build();
|
||||
|
||||
ClusterState clusterState =
|
||||
ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build();
|
||||
|
||||
{
|
||||
RestoreSnapshotResponse responseWithOKStatus = new RestoreSnapshotResponse(new RestoreInfo("test", List.of(), 1, 1));
|
||||
try (NoOpClient clientPropagatingOKResponse = getClientTriggeringResponse(responseWithOKStatus)) {
|
||||
MountSnapshotStep step = new MountSnapshotStep(randomStepKey(), randomStepKey(), clientPropagatingOKResponse,
|
||||
RESTORED_INDEX_PREFIX);
|
||||
step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("expecting successful response but got: [" + e.getMessage() + "]");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
RestoreSnapshotResponse responseWithACCEPTEDStatus = new RestoreSnapshotResponse((RestoreInfo) null);
|
||||
try (NoOpClient clientPropagatingACCEPTEDResponse = getClientTriggeringResponse(responseWithACCEPTEDStatus)) {
|
||||
MountSnapshotStep step = new MountSnapshotStep(randomStepKey(), randomStepKey(), clientPropagatingACCEPTEDResponse,
|
||||
RESTORED_INDEX_PREFIX);
|
||||
step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
assertThat(complete, is(true));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
fail("expecting successful response but got: [" + e.getMessage() + "]");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private NoOpClient getClientTriggeringResponse(RestoreSnapshotResponse response) {
|
||||
return new NoOpClient(getTestName()) {
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action,
|
||||
Request request,
|
||||
ActionListener<Response> listener) {
|
||||
listener.onResponse((Response) response);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
private NoOpClient getRestoreSnapshotRequestAssertingClient(String expectedRepoName, String expectedSnapshotName, String indexName,
|
||||
String restoredIndexPrefix) {
|
||||
return new NoOpClient(getTestName()) {
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action,
|
||||
Request request,
|
||||
ActionListener<Response> listener) {
|
||||
assertThat(action.name(), is(MountSearchableSnapshotAction.NAME));
|
||||
assertTrue(request instanceof MountSearchableSnapshotRequest);
|
||||
MountSearchableSnapshotRequest mountSearchableSnapshotRequest = (MountSearchableSnapshotRequest) request;
|
||||
assertThat(mountSearchableSnapshotRequest.repositoryName(), is(expectedRepoName));
|
||||
assertThat(mountSearchableSnapshotRequest.snapshotName(), is(expectedSnapshotName));
|
||||
assertThat("another ILM step will wait for the restore to complete. the " + MountSnapshotStep.NAME + " step should not",
|
||||
mountSearchableSnapshotRequest.waitForCompletion(), is(false));
|
||||
assertThat(mountSearchableSnapshotRequest.ignoreIndexSettings(), is(notNullValue()));
|
||||
assertThat(mountSearchableSnapshotRequest.ignoreIndexSettings()[0], is(LifecycleSettings.LIFECYCLE_NAME));
|
||||
assertThat(mountSearchableSnapshotRequest.mountedIndexName(), is(restoredIndexPrefix + indexName));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -0,0 +1,76 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction.NAME;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class SearchableSnapshotActionTests extends AbstractActionTestCase<SearchableSnapshotAction> {
|
||||
|
||||
@Override
|
||||
public void testToSteps() {
|
||||
String phase = randomAlphaOfLengthBetween(1, 10);
|
||||
StepKey expectedFirstStep = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME);
|
||||
StepKey expectedSecondStep = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME);
|
||||
StepKey expectedThirdStep = new StepKey(phase, NAME, CleanupSnapshotStep.NAME);
|
||||
StepKey expectedFourthStep = new StepKey(phase, NAME, CreateSnapshotStep.NAME);
|
||||
StepKey expectedFifthStep = new StepKey(phase, NAME, MountSnapshotStep.NAME);
|
||||
StepKey expectedSixthStep = new StepKey(phase, NAME, WaitForIndexColorStep.NAME);
|
||||
StepKey expectedSeventhStep = new StepKey(phase, NAME, CopyExecutionStateStep.NAME);
|
||||
StepKey expectedEighthStep = new StepKey(phase, NAME, CopySettingsStep.NAME);
|
||||
StepKey expectedNinthStep = new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME);
|
||||
|
||||
SearchableSnapshotAction action = createTestInstance();
|
||||
StepKey nextStepKey = new StepKey(phase, randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(1, 5));
|
||||
|
||||
List<Step> steps = action.toSteps(null, phase, nextStepKey);
|
||||
assertThat(steps.size(), is(9));
|
||||
|
||||
assertThat(steps.get(0).getKey(), is(expectedFirstStep));
|
||||
assertThat(steps.get(1).getKey(), is(expectedSecondStep));
|
||||
assertThat(steps.get(2).getKey(), is(expectedThirdStep));
|
||||
assertThat(steps.get(3).getKey(), is(expectedFourthStep));
|
||||
assertThat(steps.get(4).getKey(), is(expectedFifthStep));
|
||||
assertThat(steps.get(5).getKey(), is(expectedSixthStep));
|
||||
assertThat(steps.get(6).getKey(), is(expectedSeventhStep));
|
||||
assertThat(steps.get(7).getKey(), is(expectedEighthStep));
|
||||
assertThat(steps.get(8).getKey(), is(expectedNinthStep));
|
||||
|
||||
AsyncActionBranchingStep branchStep = (AsyncActionBranchingStep) steps.get(3);
|
||||
assertThat(branchStep.getNextKeyOnIncompleteResponse(), is(expectedThirdStep));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchableSnapshotAction doParseInstance(XContentParser parser) throws IOException {
|
||||
return SearchableSnapshotAction.parse(parser);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchableSnapshotAction createTestInstance() {
|
||||
return randomInstance();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Writeable.Reader<SearchableSnapshotAction> instanceReader() {
|
||||
return SearchableSnapshotAction::new;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SearchableSnapshotAction mutateInstance(SearchableSnapshotAction instance) throws IOException {
|
||||
return randomInstance();
|
||||
}
|
||||
|
||||
static SearchableSnapshotAction randomInstance() {
|
||||
return new SearchableSnapshotAction(randomAlphaOfLengthBetween(5, 10));
|
||||
}
|
||||
}
|
@ -174,7 +174,7 @@ public class ShrinkActionTests extends AbstractActionTestCase<ShrinkAction> {
|
||||
assertTrue(steps.get(7) instanceof CopyExecutionStateStep);
|
||||
assertThat(steps.get(7).getKey(), equalTo(expectedEighthKey));
|
||||
assertThat(steps.get(7).getNextStepKey(), equalTo(expectedNinthKey));
|
||||
assertThat(((CopyExecutionStateStep) steps.get(7)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX));
|
||||
assertThat(((CopyExecutionStateStep) steps.get(7)).getTargetIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX));
|
||||
|
||||
assertTrue(steps.get(8) instanceof ShrinkSetAliasStep);
|
||||
assertThat(steps.get(8).getKey(), equalTo(expectedNinthKey));
|
||||
|
@ -0,0 +1,135 @@
|
||||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License;
|
||||
* you may not use this file except in compliance with the Elastic License.
|
||||
*/
|
||||
package org.elasticsearch.xpack.core.ilm;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.action.ActionType;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.AliasMetadata;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||
import org.elasticsearch.cluster.metadata.Metadata;
|
||||
import org.elasticsearch.test.client.NoOpClient;
|
||||
import org.elasticsearch.xpack.core.ilm.AsyncActionStep.Listener;
|
||||
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class SwapAliasesAndDeleteSourceIndexStepTests extends AbstractStepTestCase<SwapAliasesAndDeleteSourceIndexStep> {
|
||||
|
||||
@Override
|
||||
public SwapAliasesAndDeleteSourceIndexStep createRandomInstance() {
|
||||
StepKey stepKey = randomStepKey();
|
||||
StepKey nextStepKey = randomStepKey();
|
||||
String restoredIndexPrefix = randomAlphaOfLength(10);
|
||||
return new SwapAliasesAndDeleteSourceIndexStep(stepKey, nextStepKey, client, restoredIndexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected SwapAliasesAndDeleteSourceIndexStep copyInstance(SwapAliasesAndDeleteSourceIndexStep instance) {
|
||||
return new SwapAliasesAndDeleteSourceIndexStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(),
|
||||
instance.getTargetIndexPrefix());
|
||||
}
|
||||
|
||||
@Override
|
||||
public SwapAliasesAndDeleteSourceIndexStep mutateInstance(SwapAliasesAndDeleteSourceIndexStep instance) {
|
||||
StepKey key = instance.getKey();
|
||||
StepKey nextKey = instance.getNextStepKey();
|
||||
String restoredIndexPrefix = instance.getTargetIndexPrefix();
|
||||
switch (between(0, 2)) {
|
||||
case 0:
|
||||
key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 1:
|
||||
nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5));
|
||||
break;
|
||||
case 2:
|
||||
restoredIndexPrefix += randomAlphaOfLength(5);
|
||||
break;
|
||||
default:
|
||||
throw new AssertionError("Illegal randomisation branch");
|
||||
}
|
||||
return new SwapAliasesAndDeleteSourceIndexStep(key, nextKey, instance.getClient(), restoredIndexPrefix);
|
||||
}
|
||||
|
||||
public void testPerformAction() {
|
||||
String sourceIndexName = randomAlphaOfLength(10);
|
||||
IndexMetadata.Builder sourceIndexMetadataBuilder = IndexMetadata.builder(sourceIndexName).settings(settings(Version.CURRENT))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
AliasMetadata.Builder aliasBuilder = AliasMetadata.builder(randomAlphaOfLengthBetween(3, 10));
|
||||
if (randomBoolean()) {
|
||||
aliasBuilder.routing(randomAlphaOfLengthBetween(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
aliasBuilder.searchRouting(randomAlphaOfLengthBetween(1, 10));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
aliasBuilder.indexRouting(randomAlphaOfLengthBetween(1, 10));
|
||||
}
|
||||
aliasBuilder.writeIndex(randomBoolean());
|
||||
AliasMetadata aliasMetaData = aliasBuilder.build();
|
||||
IndexMetadata sourceIndexMetaData = sourceIndexMetadataBuilder.putAlias(aliasMetaData).build();
|
||||
|
||||
String targetIndexPrefix = "index_prefix";
|
||||
String targetIndexName = targetIndexPrefix + sourceIndexName;
|
||||
|
||||
List<AliasActions> expectedAliasActions = Arrays.asList(
|
||||
AliasActions.removeIndex().index(sourceIndexName),
|
||||
AliasActions.add().index(targetIndexName).alias(sourceIndexName),
|
||||
AliasActions.add().index(targetIndexName).alias(aliasMetaData.alias())
|
||||
.searchRouting(aliasMetaData.searchRouting()).indexRouting(aliasMetaData.indexRouting())
|
||||
.writeIndex(null));
|
||||
|
||||
try (NoOpClient client = getIndicesAliasAssertingClient(expectedAliasActions)) {
|
||||
SwapAliasesAndDeleteSourceIndexStep step = new SwapAliasesAndDeleteSourceIndexStep(randomStepKey(), randomStepKey(),
|
||||
client, targetIndexPrefix);
|
||||
|
||||
IndexMetadata.Builder targetIndexMetaDataBuilder = IndexMetadata.builder(targetIndexName).settings(settings(Version.CURRENT))
|
||||
.numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5));
|
||||
|
||||
ClusterState clusterState = ClusterState.builder(emptyClusterState())
|
||||
.metadata(
|
||||
Metadata.builder()
|
||||
.put(sourceIndexMetaData, true)
|
||||
.put(targetIndexMetaDataBuilder)
|
||||
.build()
|
||||
).build();
|
||||
|
||||
step.performAction(sourceIndexMetaData, clusterState, null, new Listener() {
|
||||
@Override
|
||||
public void onResponse(boolean complete) {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Exception e) {
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private NoOpClient getIndicesAliasAssertingClient(List<AliasActions> expectedAliasActions) {
|
||||
return new NoOpClient(getTestName()) {
|
||||
@Override
|
||||
protected <Request extends ActionRequest, Response extends ActionResponse> void doExecute(ActionType<Response> action,
|
||||
Request request,
|
||||
ActionListener<Response> listener) {
|
||||
assertThat(action.name(), is(IndicesAliasesAction.NAME));
|
||||
assertTrue(request instanceof IndicesAliasesRequest);
|
||||
assertThat(((IndicesAliasesRequest) request).getAliasActions(), equalTo(expectedAliasActions));
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -45,6 +45,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase {
|
||||
private static final FreezeAction TEST_FREEZE_ACTION = new FreezeAction();
|
||||
private static final SetPriorityAction TEST_PRIORITY_ACTION = new SetPriorityAction(0);
|
||||
private static final UnfollowAction TEST_UNFOLLOW_ACTION = new UnfollowAction();
|
||||
private static final SearchableSnapshotAction TEST_SEARCHABLE_SNAPSHOT_ACTION = new SearchableSnapshotAction("repo");
|
||||
|
||||
public void testValidatePhases() {
|
||||
boolean invalid = randomBoolean();
|
||||
@ -595,6 +596,8 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase {
|
||||
return TEST_PRIORITY_ACTION;
|
||||
case UnfollowAction.NAME:
|
||||
return TEST_UNFOLLOW_ACTION;
|
||||
case SearchableSnapshotAction.NAME:
|
||||
return TEST_SEARCHABLE_SNAPSHOT_ACTION;
|
||||
default:
|
||||
throw new IllegalArgumentException("unsupported timeseries phase action [" + actionName + "]");
|
||||
}
|
||||
|
@ -37,7 +37,8 @@ public class WaitForIndexColorStepTests extends AbstractStepTestCase<WaitForInde
|
||||
StepKey stepKey = randomStepKey();
|
||||
StepKey nextStepKey = randomStepKey();
|
||||
ClusterHealthStatus color = randomColor();
|
||||
return new WaitForIndexColorStep(stepKey, nextStepKey, color);
|
||||
String indexPrefix = randomAlphaOfLengthBetween(1, 10);
|
||||
return new WaitForIndexColorStep(stepKey, nextStepKey, color, indexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -45,6 +46,8 @@ public class WaitForIndexColorStepTests extends AbstractStepTestCase<WaitForInde
|
||||
StepKey key = instance.getKey();
|
||||
StepKey nextKey = instance.getNextStepKey();
|
||||
ClusterHealthStatus color = instance.getColor(), newColor = randomColor();
|
||||
String indexPrefix = instance.getIndexNamePrefix();
|
||||
|
||||
while (color.equals(newColor)) {
|
||||
newColor = randomColor();
|
||||
}
|
||||
@ -59,14 +62,17 @@ public class WaitForIndexColorStepTests extends AbstractStepTestCase<WaitForInde
|
||||
case 2:
|
||||
color = newColor;
|
||||
break;
|
||||
case 3:
|
||||
indexPrefix = randomAlphaOfLengthBetween(1, 10);
|
||||
break;
|
||||
}
|
||||
|
||||
return new WaitForIndexColorStep(key, nextKey, color);
|
||||
return new WaitForIndexColorStep(key, nextKey, color, indexPrefix);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected WaitForIndexColorStep copyInstance(WaitForIndexColorStep instance) {
|
||||
return new WaitForIndexColorStep(instance.getKey(), instance.getNextStepKey(), instance.getColor());
|
||||
return new WaitForIndexColorStep(instance.getKey(), instance.getNextStepKey(), instance.getColor(), instance.getIndexNamePrefix());
|
||||
}
|
||||
|
||||
public void testConditionMetForGreen() {
|
||||
|
@ -22,6 +22,7 @@ import org.elasticsearch.xpack.core.ilm.LifecyclePolicyTests;
|
||||
import org.elasticsearch.xpack.core.ilm.LifecycleType;
|
||||
import org.elasticsearch.xpack.core.ilm.ReadOnlyAction;
|
||||
import org.elasticsearch.xpack.core.ilm.RolloverAction;
|
||||
import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction;
|
||||
import org.elasticsearch.xpack.core.ilm.SetPriorityAction;
|
||||
import org.elasticsearch.xpack.core.ilm.ShrinkAction;
|
||||
import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType;
|
||||
@ -73,7 +74,8 @@ public class PutLifecycleRequestTests extends AbstractSerializingTestCase<Reques
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, FreezeAction.NAME, FreezeAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new)
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new),
|
||||
new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new)
|
||||
));
|
||||
}
|
||||
|
||||
@ -93,6 +95,8 @@ public class PutLifecycleRequestTests extends AbstractSerializingTestCase<Reques
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(FreezeAction.NAME), FreezeAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SetPriorityAction.NAME), SetPriorityAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SearchableSnapshotAction.NAME),
|
||||
SearchableSnapshotAction::parse),
|
||||
new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse)
|
||||
));
|
||||
return new NamedXContentRegistry(entries);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user