diff --git a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java index 9e86bef480b..6e994e3fbf7 100644 --- a/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java +++ b/benchmarks/src/main/java/org/elasticsearch/benchmark/routing/allocation/Allocators.java @@ -47,17 +47,21 @@ public final class Allocators { public static final NoopGatewayAllocator INSTANCE = new NoopGatewayAllocator(); @Override - public void applyStartedShards(RoutingAllocation allocation, List startedShards) { + public void applyStartedShards(List startedShards, RoutingAllocation allocation) { // noop } @Override - public void applyFailedShards(RoutingAllocation allocation, List failedShards) { + public void applyFailedShards(List failedShards, RoutingAllocation allocation) { // noop } @Override - public void allocateUnassigned(RoutingAllocation allocation) { + public void allocateUnassigned( + ShardRouting shardRouting, + RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler + ) { // noop } } diff --git a/docs/build.gradle b/docs/build.gradle index 8d4511e0cba..d4f685cb9ed 100644 --- a/docs/build.gradle +++ b/docs/build.gradle @@ -54,6 +54,7 @@ testClusters.integTest { if (BuildParams.isSnapshotBuild() == false) { systemProperty 'es.autoscaling_feature_flag_registered', 'true' systemProperty 'es.eql_feature_flag_registered', 'true' + systemProperty 'es.searchable_snapshots_feature_enabled', 'true' } setting 'xpack.autoscaling.enabled', 'true' setting 'xpack.eql.enabled', 'true' diff --git a/docs/reference/ilm/apis/get-lifecycle.asciidoc b/docs/reference/ilm/apis/get-lifecycle.asciidoc index 863de23cd33..b67fb69d363 100644 --- a/docs/reference/ilm/apis/get-lifecycle.asciidoc +++ b/docs/reference/ilm/apis/get-lifecycle.asciidoc @@ -100,7 +100,9 @@ If the request succeeds, the body of the response contains the policy definition "delete": { "min_age": "30d", "actions": { - "delete": {} + "delete": { + "delete_searchable_snapshot": true + } } } } diff --git a/docs/reference/ilm/policy-definitions.asciidoc b/docs/reference/ilm/policy-definitions.asciidoc index ca46607d785..98c2bd3a3d0 100644 --- a/docs/reference/ilm/policy-definitions.asciidoc +++ b/docs/reference/ilm/policy-definitions.asciidoc @@ -112,6 +112,7 @@ policy definition. - <> - <> - <> + - <> * Delete - <> - <> @@ -266,7 +267,15 @@ Phases allowed: delete. The Delete Action does just that, it deletes the index. -This action does not have any options associated with it. +[[ilm-delete-action-options]] +.Delete +[options="header"] +|====== +| Name | Required | Default | Description +| `delete_searchable_snapshot` | no | true | Deletes the searchable snapshot created in the cold phase, if a + snapshot was created (ie. if the <> was used in the cold phase) +|====== [source,console] -------------------------------------------------- @@ -580,6 +589,43 @@ The above example illustrates a policy which attempts to delete an index one day after the index has been rolled over. It does not delete the index one day after it has been created. +[[ilm-searchable-snapshot-action]] +==== Searchable Snapshot + +Phases allowed: cold. + +This action takes a snapshot of the managed index in the configured repository +and mounts it as a searchable snapshot. + +[[ilm-searchable-snapshot-options]] +.Searchable Snapshot Options +[options="header"] +|====== +| Name | Required | Default | Description +| `snapshot_repository` | yes | - | Repository used to store the snapshot created by this action. The snapshot will be, + by default, deleted by the <> in the delete phase, if + configured, but the user can configure the <> to keep + the snapshot using the `delete_searchable_snapshot` option. + +|====== + +[source,console] +-------------------------------------------------- +PUT _ilm/policy/my_policy +{ + "policy": { + "phases": { + "cold": { + "actions": { + "searchable_snapshot" : { + "snapshot_repository" : "backing_repo" + } + } + } + } + } +} +-------------------------------------------------- [[ilm-set-priority-action]] ==== Set Priority diff --git a/docs/reference/ilm/update-lifecycle-policy.asciidoc b/docs/reference/ilm/update-lifecycle-policy.asciidoc index e29967b60ec..3b19794e8cb 100644 --- a/docs/reference/ilm/update-lifecycle-policy.asciidoc +++ b/docs/reference/ilm/update-lifecycle-policy.asciidoc @@ -113,7 +113,9 @@ with its version bumped to 2. "delete": { "min_age": "10d", "actions": { - "delete": {} + "delete": { + "delete_searchable_snapshot": true + } } } } diff --git a/docs/reference/rest-api/index.asciidoc b/docs/reference/rest-api/index.asciidoc index 9c7d52f0ae0..56b5214d7a0 100644 --- a/docs/reference/rest-api/index.asciidoc +++ b/docs/reference/rest-api/index.asciidoc @@ -31,6 +31,7 @@ endif::[] * <> * <> * <> +* <> * <> * <> * <> @@ -59,6 +60,7 @@ include::{es-repo-dir}/migration/migration.asciidoc[] include::{es-repo-dir}/indices/apis/reload-analyzers.asciidoc[] include::{es-repo-dir}/rollup/rollup-api.asciidoc[] include::{es-repo-dir}/search.asciidoc[] +include::{es-repo-dir}/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc[] include::{xes-repo-dir}/rest-api/security.asciidoc[] include::{es-repo-dir}/slm/apis/slm-api.asciidoc[] include::{es-repo-dir}/transform/apis/index.asciidoc[] diff --git a/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc new file mode 100644 index 00000000000..9d5685fdc68 --- /dev/null +++ b/docs/reference/searchable-snapshots/apis/clear-cache.asciidoc @@ -0,0 +1,76 @@ +[role="xpack"] +[testenv="basic"] +[[searchable-snapshots-api-clear-cache]] +=== Clear cache API +++++ +Clear cache +++++ + +experimental[] + +Clear the cache of searchable snapshots. + +[[searchable-snapshots-api-clear-cache-request]] +==== {api-request-title} + +`POST /_searchable_snapshots/cache/clear` + +`POST //_searchable_snapshots/cache/clear` + +[[searchable-snapshots-api-clear-cache-prereqs]] +==== {api-prereq-title} + +If the {es} {security-features} are enabled, you must have the +`manage` cluster privilege and the `manage` index privilege +for any included indices to use this API. +For more information, see <>. + +[[searchable-snapshots-api-clear-cache-desc]] +==== {api-description-title} + + +[[searchable-snapshots-api-clear-cache-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +A comma-separated list of index names for which the +searchable snapshots cache must be cleared. + + +[[searchable-snapshots-api-clear-cache-example]] +==== {api-examples-title} +//// +[source,console] +----------------------------------- +PUT /docs +{ + "settings" : { + "index.number_of_shards" : 1, + "index.number_of_replicas" : 0 + } +} + +PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true +{ + "include_global_state": false, + "indices": "docs" +} + +DELETE /docs + +POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true +{ + "index": "docs" +} +----------------------------------- +// TEST[setup:setup-repository] +//// + +Clears the cache of the index `docs`: + +[source,console] +-------------------------------------------------- +POST /docs/_searchable_snapshots/cache/clear +-------------------------------------------------- +// TEST[continued] diff --git a/docs/reference/searchable-snapshots/apis/get-stats.asciidoc b/docs/reference/searchable-snapshots/apis/get-stats.asciidoc new file mode 100644 index 00000000000..c54fe96d9f8 --- /dev/null +++ b/docs/reference/searchable-snapshots/apis/get-stats.asciidoc @@ -0,0 +1,76 @@ +[role="xpack"] +[testenv="basic"] +[[searchable-snapshots-api-stats]] +=== Searchable snapshot statistics API +++++ +Searchable snapshot statistics +++++ + +experimental[] + +Retrieve various statistics about searchable snapshots. + +[[searchable-snapshots-api-stats-request]] +==== {api-request-title} + +`GET /_searchable_snapshots/stats` + +`GET //_searchable_snapshots/stats` + +[[searchable-snapshots-api-stats-prereqs]] +==== {api-prereq-title} + +If the {es} {security-features} are enabled, you must have the +`manage` cluster privilege and the `manage` index privilege +for any included indices to use this API. +For more information, see <>. + +[[searchable-snapshots-api-stats-desc]] +==== {api-description-title} + + +[[searchable-snapshots-api-stats-path-params]] +==== {api-path-parms-title} + +``:: +(Optional, string) +A comma-separated list of index names for which the +statistics must be retrieved. + + +[[searchable-snapshots-api-stats-example]] +==== {api-examples-title} +//// +[source,console] +----------------------------------- +PUT /docs +{ + "settings" : { + "index.number_of_shards" : 1, + "index.number_of_replicas" : 0 + } +} + +PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true +{ + "include_global_state": false, + "indices": "docs" +} + +DELETE /docs + +POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true +{ + "index": "docs" +} +----------------------------------- +// TEST[setup:setup-repository] +//// + +Retrieves the statistics of the index `docs`: + +[source,console] +-------------------------------------------------- +GET /docs/_searchable_snapshots/stats +-------------------------------------------------- +// TEST[continued] diff --git a/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc new file mode 100644 index 00000000000..7522e64944e --- /dev/null +++ b/docs/reference/searchable-snapshots/apis/mount-snapshot.asciidoc @@ -0,0 +1,126 @@ +[role="xpack"] +[testenv="basic"] +[[searchable-snapshots-api-mount-snapshot]] +=== Mount snapshot API +++++ +Mount snapshot +++++ + +experimental[] + +Mount a snapshot as a searchable index. + +[[searchable-snapshots-api-mount-request]] +==== {api-request-title} + +`POST /_snapshot///_mount` + +[[searchable-snapshots-api-mount-prereqs]] +==== {api-prereq-title} + +If the {es} {security-features} are enabled, you must have the +`manage` cluster privilege and the `manage` index privilege +for any included indices to use this API. +For more information, see <>. + +[[searchable-snapshots-api-mount-desc]] +==== {api-description-title} + + +[[searchable-snapshots-api-mount-path-params]] +==== {api-path-parms-title} + +``:: +(Required, string) +The name of the repository containing +the snapshot of the index to mount. + +``:: +(Required, string) +The name of the snapshot of the index +to mount. + +[[searchable-snapshots-api-mount-query-params]] +==== {api-query-parms-title} + +include::{docdir}/rest-api/common-parms.asciidoc[tag=master-timeout] + +include::{docdir}/rest-api/common-parms.asciidoc[tag=wait_for_completion] + +[[searchable-snapshots-api-mount-request-body]] +==== {api-request-body-title} + +`index`:: +(Required, string) +Name of the index contained in the snapshot +whose data is to be mounted. + +If no `renamed_index` is specified this name +will also be used to create the new index. + +`renamed_index`:: ++ +-- +(Optional, string) +Name of the index that will be created. +-- + +`index_settings`:: ++ +-- +(Optional, object) +Settings that should be added to the index when it is mounted. +-- + +`ignore_index_settings`:: ++ +-- +(Optional, array of strings) +Names of settings that should be removed from the index when it is mounted. +-- + +[[searchable-snapshots-api-mount-example]] +==== {api-examples-title} +//// +[source,console] +----------------------------------- +PUT /my_docs +{ + "settings" : { + "index.number_of_shards" : 1, + "index.number_of_replicas" : 0 + } +} + +PUT /_snapshot/my_repository/my_snapshot?wait_for_completion=true +{ + "include_global_state": false, + "indices": "my_docs" +} + +DELETE /my_docs +----------------------------------- +// TEST[setup:setup-repository] +//// + +Mounts the index `my_docs` from an existing snapshot named `my_snapshot` stored +in the `my_repository` as a new index `docs`: + +[source,console] +-------------------------------------------------- +POST /_snapshot/my_repository/my_snapshot/_mount?wait_for_completion=true +{ + "index": "my_docs", <1> + "renamed_index": "docs", <2> + "index_settings": { <3> + "index.number_of_replicas": 0 + }, + "ignored_index_settings": [ "index.refresh_interval" ] <4> +} +-------------------------------------------------- +// TEST[continued] + +<1> The name of the index in the snapshot to mount +<2> The name of the index to create +<3> Any index settings to add to the new index +<4> List of indices to ignore when mounting the snapshotted index diff --git a/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc b/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc new file mode 100644 index 00000000000..5b56c644090 --- /dev/null +++ b/docs/reference/searchable-snapshots/apis/searchable-snapshots-apis.asciidoc @@ -0,0 +1,16 @@ +[role="xpack"] +[testenv="basic"] +[[searchable-snapshots-apis]] +== Searchable snapshots APIs + +experimental[] + +You can use the following APIs to perform searchable snapshots operations. + +* <> +* <> +* <> + +include::mount-snapshot.asciidoc[] +include::clear-cache.asciidoc[] +include::get-stats.asciidoc[] diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java index bd1f85c7809..de803dd8ada 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobContainer.java @@ -19,6 +19,7 @@ package org.elasticsearch.repositories.azure; +import com.microsoft.azure.storage.Constants; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.apache.logging.log4j.LogManager; @@ -68,10 +69,8 @@ public class AzureBlobContainer extends AbstractBlobContainer { return false; } - @Override - public InputStream readBlob(String blobName) throws IOException { - logger.trace("readBlob({})", blobName); - + private InputStream openInputStream(String blobName, long position, @Nullable Long length) throws IOException { + logger.trace("readBlob({}) from position [{}] with length [{}]", blobName, position, length != null ? length : "unlimited"); if (blobStore.getLocationMode() == LocationMode.SECONDARY_ONLY && !blobExists(blobName)) { // On Azure, if the location path is a secondary location, and the blob does not // exist, instead of returning immediately from the getInputStream call below @@ -81,9 +80,8 @@ public class AzureBlobContainer extends AbstractBlobContainer { // stream to it. throw new NoSuchFileException("Blob [" + blobName + "] does not exist"); } - try { - return blobStore.getInputStream(buildKey(blobName)); + return blobStore.getInputStream(buildKey(blobName), position, length); } catch (StorageException e) { if (e.getHttpStatusCode() == HttpURLConnection.HTTP_NOT_FOUND) { throw new NoSuchFileException(e.getMessage()); @@ -94,6 +92,21 @@ public class AzureBlobContainer extends AbstractBlobContainer { } } + @Override + public InputStream readBlob(String blobName) throws IOException { + return openInputStream(blobName, 0L, null); + } + + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + return openInputStream(blobName, position, length); + } + + @Override + public long readBlobPreferredLength() { + return Constants.DEFAULT_MINIMUM_READ_SIZE_IN_BYTES; + } + @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize); diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java index 3c0e0175963..8f9be0f91c4 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureBlobStore.java @@ -22,6 +22,7 @@ package org.elasticsearch.repositories.azure; import com.microsoft.azure.storage.LocationMode; import com.microsoft.azure.storage.StorageException; import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobMetadata; import org.elasticsearch.common.blobstore.BlobPath; @@ -100,8 +101,8 @@ public class AzureBlobStore implements BlobStore { return service.deleteBlobDirectory(clientName, container, path, executor); } - public InputStream getInputStream(String blob) throws URISyntaxException, StorageException, IOException { - return service.getInputStream(clientName, container, blob); + public InputStream getInputStream(String blob, long position, @Nullable Long length) throws URISyntaxException, StorageException { + return service.getInputStream(clientName, container, blob, position, length); } public Map listBlobsByPrefix(String keyPath, String prefix) diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java index 5fa4810ea9a..78f36449cfa 100644 --- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java +++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureStorageService.java @@ -43,6 +43,7 @@ import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.blobstore.BlobMetadata; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.blobstore.DeleteResult; @@ -257,13 +258,13 @@ public class AzureStorageService { return new DeleteResult(blobsDeleted.get(), bytesDeleted.get()); } - public InputStream getInputStream(String account, String container, String blob) - throws URISyntaxException, StorageException, IOException { + public InputStream getInputStream(String account, String container, String blob, long position, @Nullable Long length) + throws URISyntaxException, StorageException { final Tuple> client = client(account); final CloudBlockBlob blockBlobReference = client.v1().getContainerReference(container).getBlockBlobReference(blob); logger.trace(() -> new ParameterizedMessage("reading container [{}], blob [{}]", container, blob)); final BlobInputStream is = SocketAccess.doPrivilegedException(() -> - blockBlobReference.openInputStream(null, null, client.v2().get())); + blockBlobReference.openInputStream(position, length, null, null, client.v2().get())); return giveSocketPermissionsToStream(is); } diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java index 3ec5d394392..0dd5944010d 100644 --- a/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java +++ b/plugins/repository-azure/src/test/java/org/elasticsearch/repositories/azure/AzureBlobContainerRetriesTests.java @@ -25,12 +25,14 @@ import com.microsoft.azure.storage.blob.BlobRequestOptions; import com.sun.net.httpserver.HttpExchange; import com.sun.net.httpserver.HttpServer; import fixture.azure.AzureHttpHandler; +import org.apache.http.HttpStatus; import org.elasticsearch.cluster.metadata.RepositoryMetadata; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; @@ -63,6 +65,7 @@ import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; @@ -81,8 +84,10 @@ import static org.elasticsearch.repositories.azure.AzureStorageSettings.TIMEOUT_ import static org.elasticsearch.repositories.blobstore.ESBlobStoreRepositoryIntegTestCase.randomBytes; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * This class tests how a {@link AzureBlobContainer} and its underlying SDK client are retrying requests when reading or writing blobs. @@ -90,6 +95,8 @@ import static org.hamcrest.Matchers.lessThan; @SuppressForbidden(reason = "use a http server") public class AzureBlobContainerRetriesTests extends ESTestCase { + private static final long MAX_RANGE_VAL = Long.MAX_VALUE - 1L; + private HttpServer httpServer; private ThreadPool threadPool; @@ -128,7 +135,7 @@ public class AzureBlobContainerRetriesTests extends ESTestCase { final AzureStorageService service = new AzureStorageService(clientSettings.build()) { @Override RetryPolicyFactory createRetryPolicy(final AzureStorageSettings azureStorageSettings) { - return new RetryExponentialRetry(1, 100, 500, azureStorageSettings.getMaxRetries()); + return new RetryExponentialRetry(1, 10, 100, azureStorageSettings.getMaxRetries()); } @Override @@ -150,7 +157,16 @@ public class AzureBlobContainerRetriesTests extends ESTestCase { public void testReadNonexistentBlobThrowsNoSuchFileException() { final BlobContainer blobContainer = createBlobContainer(between(1, 5)); - final Exception exception = expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob")); + final Exception exception = expectThrows(NoSuchFileException.class, + () -> { + if (randomBoolean()) { + blobContainer.readBlob("read_nonexistent_blob"); + } else { + final long position = randomLongBetween(0, MAX_RANGE_VAL - 1L); + final long length = randomLongBetween(1, MAX_RANGE_VAL - position); + blobContainer.readBlob("read_nonexistent_blob", position, length); + } + }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("not found")); } @@ -160,34 +176,35 @@ public class AzureBlobContainerRetriesTests extends ESTestCase { final CountDown countDownGet = new CountDown(maxRetries); final byte[] bytes = randomBlobContent(); httpServer.createContext("/container/read_blob_max_retries", exchange -> { - Streams.readFully(exchange.getRequestBody()); - if ("HEAD".equals(exchange.getRequestMethod())) { - if (countDownHead.countDown()) { - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length)); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); - exchange.close(); - return; + try { + Streams.readFully(exchange.getRequestBody()); + if ("HEAD".equals(exchange.getRequestMethod())) { + if (countDownHead.countDown()) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + return; + } + } else if ("GET".equals(exchange.getRequestMethod())) { + if (countDownGet.countDown()) { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + final int length = bytes.length - rangeStart; + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); + exchange.getResponseBody().write(bytes, rangeStart, length); + return; + } } - } else if ("GET".equals(exchange.getRequestMethod())) { - if (countDownGet.countDown()) { - final int rangeStart = getRangeStart(exchange); - assertThat(rangeStart, lessThan(bytes.length)); - final int length = bytes.length - rangeStart; - exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); - exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); - exchange.getResponseBody().write(bytes, rangeStart, length); - exchange.close(); - return; + if (randomBoolean()) { + AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); } + } finally { + exchange.close(); } - if (randomBoolean()) { - AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); - } - exchange.close(); }); final BlobContainer blobContainer = createBlobContainer(maxRetries); @@ -198,6 +215,58 @@ public class AzureBlobContainerRetriesTests extends ESTestCase { } } + public void testReadRangeBlobWithRetries() throws Exception { + final int maxRetries = randomIntBetween(1, 5); + final CountDown countDownHead = new CountDown(maxRetries); + final CountDown countDownGet = new CountDown(maxRetries); + final byte[] bytes = randomBlobContent(); + httpServer.createContext("/container/read_range_blob_max_retries", exchange -> { + try { + Streams.readFully(exchange.getRequestBody()); + if ("HEAD".equals(exchange.getRequestMethod())) { + if (countDownHead.countDown()) { + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(bytes.length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), -1); + return; + } + } else if ("GET".equals(exchange.getRequestMethod())) { + if (countDownGet.countDown()) { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + final Optional rangeEnd = getRangeEnd(exchange); + assertThat(rangeEnd.isPresent(), is(true)); + assertThat(rangeEnd.get(), greaterThanOrEqualTo(rangeStart)); + final int length = (rangeEnd.get() - rangeStart) + 1; + assertThat(length, lessThanOrEqualTo(bytes.length - rangeStart)); + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); + exchange.getResponseBody().write(bytes, rangeStart, length); + return; + } + } + if (randomBoolean()) { + AzureHttpHandler.sendError(exchange, randomFrom(RestStatus.INTERNAL_SERVER_ERROR, RestStatus.SERVICE_UNAVAILABLE)); + } + } finally { + exchange.close(); + } + }); + + final BlobContainer blobContainer = createBlobContainer(maxRetries); + final int position = randomIntBetween(0, bytes.length - 1); + final int length = randomIntBetween(1, bytes.length - position); + try (InputStream inputStream = blobContainer.readBlob("read_range_blob_max_retries", position, length)) { + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(inputStream)); + assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + length)), bytesRead); + assertThat(countDownHead.isCountedDown(), is(true)); + assertThat(countDownGet.isCountedDown(), is(true)); + } + } + public void testWriteBlobWithRetries() throws Exception { final int maxRetries = randomIntBetween(1, 5); final CountDown countDown = new CountDown(maxRetries); @@ -339,14 +408,56 @@ public class AzureBlobContainerRetriesTests extends ESTestCase { return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb } - private static int getRangeStart(final HttpExchange exchange) { + private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); + + private static Tuple getRanges(HttpExchange exchange) { final String rangeHeader = exchange.getRequestHeaders().getFirst("X-ms-range"); if (rangeHeader == null) { - return 0; + return Tuple.tuple(0L, MAX_RANGE_VAL); } - final Matcher matcher = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$").matcher(rangeHeader); + final Matcher matcher = RANGE_PATTERN.matcher(rangeHeader); assertTrue(rangeHeader + " matches expected pattern", matcher.matches()); - return Math.toIntExact(Long.parseLong(matcher.group(1))); + final long rangeStart = Long.parseLong(matcher.group(1)); + final long rangeEnd = Long.parseLong(matcher.group(2)); + assertThat(rangeStart, lessThanOrEqualTo(rangeEnd)); + return Tuple.tuple(rangeStart, rangeEnd); + } + + private static int getRangeStart(HttpExchange exchange) { + return Math.toIntExact(getRanges(exchange).v1()); + } + + private static Optional getRangeEnd(HttpExchange exchange) { + final long rangeEnd = getRanges(exchange).v2(); + if (rangeEnd == MAX_RANGE_VAL) { + return Optional.empty(); + } + return Optional.of(Math.toIntExact(rangeEnd)); + } + + private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + final Optional rangeEnd = getRangeEnd(exchange); + final int length; + if (rangeEnd.isPresent()) { + // adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1); + length = effectiveRangeEnd - rangeStart; + } else { + length = bytes.length - rangeStart - 1; + } + exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); + exchange.getResponseHeaders().add("x-ms-blob-content-length", String.valueOf(length)); + exchange.getResponseHeaders().add("x-ms-blob-type", "blockblob"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, length); + final int bytesToSend = randomIntBetween(0, length - 1); + if (bytesToSend > 0) { + exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); + } + if (randomBoolean()) { + exchange.getResponseBody().flush(); + } } } diff --git a/plugins/repository-s3/build.gradle b/plugins/repository-s3/build.gradle index df8a6905172..0ec2fa1c6bb 100644 --- a/plugins/repository-s3/build.gradle +++ b/plugins/repository-s3/build.gradle @@ -221,7 +221,12 @@ processTestResources { MavenFilteringHack.filter(it, expansions) } -testFixtures.useFixture(':test:fixtures:s3-fixture') +[ + 's3-fixture', + 's3-fixture-with-session-token', + 's3-fixture-with-ec2', + 's3-fixture-with-ecs', +].forEach { fixture -> testFixtures.useFixture(':test:fixtures:s3-fixture', fixture) } def fixtureAddress = { fixture -> assert useFixture: 'closure should not be used without a fixture' diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java index 94cf9a624f1..1e82c7c592b 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3BlobContainer.java @@ -46,7 +46,10 @@ import org.elasticsearch.common.blobstore.DeleteResult; import org.elasticsearch.common.blobstore.support.AbstractBlobContainer; import org.elasticsearch.common.blobstore.support.PlainBlobMetadata; import org.elasticsearch.common.collect.Tuple; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.io.InputStream; import java.util.ArrayList; @@ -86,6 +89,27 @@ class S3BlobContainer extends AbstractBlobContainer { return new S3RetryingInputStream(blobStore, buildKey(blobName)); } + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + if (position < 0L) { + throw new IllegalArgumentException("position must be non-negative"); + } + if (length < 0) { + throw new IllegalArgumentException("length must be non-negative"); + } + if (length == 0) { + return new ByteArrayInputStream(new byte[0]); + } else { + return new S3RetryingInputStream(blobStore, buildKey(blobName), position, Math.addExact(position, length - 1)); + } + } + + @Override + public long readBlobPreferredLength() { + // This container returns streams that must be fully consumed, so we tell consumers to make bounded requests. + return new ByteSizeValue(32, ByteSizeUnit.MB).getBytes(); + } + /** * This implementation ignores the failIfAlreadyExists flag as the S3 API has no way to enforce this due to its weak consistency model. */ diff --git a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java index cb3a89316f6..105f469c905 100644 --- a/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java +++ b/plugins/repository-s3/src/main/java/org/elasticsearch/repositories/s3/S3RetryingInputStream.java @@ -25,6 +25,7 @@ import com.amazonaws.services.s3.model.S3Object; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.apache.logging.log4j.message.ParameterizedMessage; +import org.elasticsearch.common.io.Streams; import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.Version; @@ -49,6 +50,8 @@ class S3RetryingInputStream extends InputStream { private final S3BlobStore blobStore; private final String blobKey; + private final long start; + private final long end; private final int maxAttempts; private InputStream currentStream; @@ -58,17 +61,32 @@ class S3RetryingInputStream extends InputStream { private boolean closed; S3RetryingInputStream(S3BlobStore blobStore, String blobKey) throws IOException { + this(blobStore, blobKey, 0, Long.MAX_VALUE - 1); + } + + // both start and end are inclusive bounds, following the definition in GetObjectRequest.setRange + S3RetryingInputStream(S3BlobStore blobStore, String blobKey, long start, long end) throws IOException { + if (start < 0L) { + throw new IllegalArgumentException("start must be non-negative"); + } + if (end < start || end == Long.MAX_VALUE) { + throw new IllegalArgumentException("end must be >= start and not Long.MAX_VALUE"); + } this.blobStore = blobStore; this.blobKey = blobKey; this.maxAttempts = blobStore.getMaxRetries() + 1; + this.start = start; + this.end = end; currentStream = openStream(); } private InputStream openStream() throws IOException { try (AmazonS3Reference clientReference = blobStore.clientReference()) { final GetObjectRequest getObjectRequest = new GetObjectRequest(blobStore.bucket(), blobKey); - if (currentOffset > 0) { - getObjectRequest.setRange(currentOffset); + if (currentOffset > 0 || start > 0 || end < Long.MAX_VALUE - 1) { + assert start + currentOffset <= end : + "requesting beyond end, start = " + start + " offset=" + currentOffset + " end=" + end; + getObjectRequest.setRange(Math.addExact(start, currentOffset), end); } final S3Object s3Object = SocketAccess.doPrivileged(() -> clientReference.client().getObject(getObjectRequest)); return s3Object.getObjectContent(); @@ -122,20 +140,32 @@ class S3RetryingInputStream extends InputStream { private void reopenStreamOrFail(IOException e) throws IOException { if (attempt >= maxAttempts) { + logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], giving up", + blobStore.bucket(), blobKey, start + currentOffset, attempt, maxAttempts), e); throw addSuppressedExceptions(e); } logger.debug(new ParameterizedMessage("failed reading [{}/{}] at offset [{}], attempt [{}] of [{}], retrying", - blobStore.bucket(), blobKey, currentOffset, attempt, maxAttempts), e); + blobStore.bucket(), blobKey, start + currentOffset, attempt, maxAttempts), e); attempt += 1; if (failures.size() < MAX_SUPPRESSED_EXCEPTIONS) { failures.add(e); } + try { + Streams.consumeFully(currentStream); + } catch (Exception e2) { + logger.trace("Failed to fully consume stream on close", e); + } IOUtils.closeWhileHandlingException(currentStream); currentStream = openStream(); } @Override public void close() throws IOException { + try { + Streams.consumeFully(currentStream); + } catch (Exception e) { + logger.trace("Failed to fully consume stream on close", e); + } currentStream.close(); closed = true; } diff --git a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java index 9c26576be7d..c90077923b8 100644 --- a/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java +++ b/plugins/repository-s3/src/test/java/org/elasticsearch/repositories/s3/S3BlobContainerRetriesTests.java @@ -32,6 +32,7 @@ import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.blobstore.BlobContainer; import org.elasticsearch.common.blobstore.BlobPath; import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.io.Streams; import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; import org.elasticsearch.common.lucene.store.InputStreamIndexInput; @@ -58,6 +59,7 @@ import java.nio.file.NoSuchFileException; import java.util.Arrays; import java.util.Locale; import java.util.Objects; +import java.util.Optional; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; @@ -70,10 +72,13 @@ import static org.elasticsearch.repositories.s3.S3ClientSettings.MAX_RETRIES_SET import static org.elasticsearch.repositories.s3.S3ClientSettings.READ_TIMEOUT_SETTING; import static org.hamcrest.Matchers.anyOf; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.either; import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; /** * This class tests how a {@link S3BlobContainer} and its underlying AWS S3 client are retrying requests when reading or writing blobs. @@ -81,6 +86,8 @@ import static org.hamcrest.Matchers.lessThan; @SuppressForbidden(reason = "use a http server") public class S3BlobContainerRetriesTests extends ESTestCase { + private static final long MAX_RANGE_VAL = Long.MAX_VALUE - 1; + private HttpServer httpServer; private S3Service service; @@ -139,8 +146,19 @@ public class S3BlobContainerRetriesTests extends ESTestCase { public void testReadNonexistentBlobThrowsNoSuchFileException() { final BlobContainer blobContainer = createBlobContainer(between(1, 5), null, null, null); - final Exception exception = expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob")); + final long position = randomLongBetween(0, MAX_RANGE_VAL); + final int length = randomIntBetween(0, Math.toIntExact(Math.min(Integer.MAX_VALUE, MAX_RANGE_VAL - position))); + final Exception exception = expectThrows(NoSuchFileException.class, + () -> { + if (randomBoolean()) { + blobContainer.readBlob("read_nonexistent_blob"); + } else { + blobContainer.readBlob("read_nonexistent_blob", 0, 1); + } + }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found")); + assertThat(expectThrows(NoSuchFileException.class, () -> blobContainer.readBlob("read_nonexistent_blob", position, length)) + .getMessage().toLowerCase(Locale.ROOT), containsString("blob object [read_nonexistent_blob] not found")); } public void testReadBlobWithRetries() throws Exception { @@ -153,6 +171,7 @@ public class S3BlobContainerRetriesTests extends ESTestCase { if (countDown.countDown()) { final int rangeStart = getRangeStart(exchange); assertThat(rangeStart, lessThan(bytes.length)); + assertEquals(Optional.empty(), getRangeEnd(exchange)); exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length - rangeStart); exchange.getResponseBody().write(bytes, rangeStart, bytes.length - rangeStart); @@ -173,8 +192,85 @@ public class S3BlobContainerRetriesTests extends ESTestCase { final TimeValue readTimeout = TimeValue.timeValueSeconds(between(1, 3)); final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null); try (InputStream inputStream = blobContainer.readBlob("read_blob_max_retries")) { - assertArrayEquals(bytes, BytesReference.toBytes(Streams.readFully(inputStream))); - assertThat(countDown.isCountedDown(), is(true)); + final int readLimit; + final InputStream wrappedStream; + if (randomBoolean()) { + // read stream only partly + readLimit = randomIntBetween(0, bytes.length); + wrappedStream = Streams.limitStream(inputStream, readLimit); + } else { + readLimit = bytes.length; + wrappedStream = inputStream; + } + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream)); + logger.info("maxRetries={}, readLimit={}, byteSize={}, bytesRead={}", + maxRetries, readLimit, bytes.length, bytesRead.length); + assertArrayEquals(Arrays.copyOfRange(bytes, 0, readLimit), bytesRead); + if (readLimit < bytes.length) { + // we might have completed things based on an incomplete response, and we're happy with that + } else { + assertTrue(countDown.isCountedDown()); + } + } + } + + public void testReadRangeBlobWithRetries() throws Exception { + final int maxRetries = randomInt(5); + final CountDown countDown = new CountDown(maxRetries + 1); + + final byte[] bytes = randomBlobContent(); + httpServer.createContext("/bucket/read_range_blob_max_retries", exchange -> { + Streams.readFully(exchange.getRequestBody()); + if (countDown.countDown()) { + final int rangeStart = getRangeStart(exchange); + assertThat(rangeStart, lessThan(bytes.length)); + assertTrue(getRangeEnd(exchange).isPresent()); + final int rangeEnd = getRangeEnd(exchange).get(); + assertThat(rangeEnd, greaterThanOrEqualTo(rangeStart)); + // adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + final int effectiveRangeEnd = Math.min(bytes.length - 1, rangeEnd); + final int length = (effectiveRangeEnd - rangeStart) + 1; + exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); + exchange.sendResponseHeaders(HttpStatus.SC_OK, length); + exchange.getResponseBody().write(bytes, rangeStart, length); + exchange.close(); + return; + } + if (randomBoolean()) { + exchange.sendResponseHeaders(randomFrom(HttpStatus.SC_INTERNAL_SERVER_ERROR, HttpStatus.SC_BAD_GATEWAY, + HttpStatus.SC_SERVICE_UNAVAILABLE, HttpStatus.SC_GATEWAY_TIMEOUT), -1); + } else if (randomBoolean()) { + sendIncompleteContent(exchange, bytes); + } + if (randomBoolean()) { + exchange.close(); + } + }); + + final TimeValue readTimeout = TimeValue.timeValueMillis(between(100, 500)); + final BlobContainer blobContainer = createBlobContainer(maxRetries, readTimeout, null, null); + final int position = randomIntBetween(0, bytes.length - 1); + final int length = randomIntBetween(0, randomBoolean() ? bytes.length : Integer.MAX_VALUE); + try (InputStream inputStream = blobContainer.readBlob("read_range_blob_max_retries", position, length)) { + final int readLimit; + final InputStream wrappedStream; + if (randomBoolean()) { + // read stream only partly + readLimit = randomIntBetween(0, length); + wrappedStream = Streams.limitStream(inputStream, readLimit); + } else { + readLimit = length; + wrappedStream = inputStream; + } + final byte[] bytesRead = BytesReference.toBytes(Streams.readFully(wrappedStream)); + logger.info("maxRetries={}, position={}, length={}, readLimit={}, byteSize={}, bytesRead={}", + maxRetries, position, length, readLimit, bytes.length, bytesRead.length); + assertArrayEquals(Arrays.copyOfRange(bytes, position, Math.min(bytes.length, position + readLimit)), bytesRead); + if (readLimit == 0 || (readLimit < length && readLimit == bytesRead.length)) { + // we might have completed things based on an incomplete response, and we're happy with that + } else { + assertTrue(countDown.isCountedDown()); + } } } @@ -194,12 +290,18 @@ public class S3BlobContainerRetriesTests extends ESTestCase { final byte[] bytes = randomBlobContent(); httpServer.createContext("/bucket/read_blob_incomplete", exchange -> sendIncompleteContent(exchange, bytes)); - exception = expectThrows(SocketTimeoutException.class, () -> { - try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) { + final int position = randomIntBetween(0, bytes.length - 1); + final int length = randomIntBetween(1, randomBoolean() ? bytes.length : Integer.MAX_VALUE); + exception = expectThrows(IOException.class, () -> { + try (InputStream stream = randomBoolean() ? + blobContainer.readBlob("read_blob_incomplete") : + blobContainer.readBlob("read_blob_incomplete", position, length)) { Streams.readFully(stream); } }); - assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("read timed out")); + assertThat(exception, either(instanceOf(SocketTimeoutException.class)).or(instanceOf(ConnectionClosedException.class))); + assertThat(exception.getMessage().toLowerCase(Locale.ROOT), either(containsString("read timed out")).or( + containsString("premature end of chunk coded message body: closing chunk expected"))); assertThat(exception.getSuppressed().length, equalTo(maxRetries)); } @@ -209,7 +311,14 @@ public class S3BlobContainerRetriesTests extends ESTestCase { // HTTP server closes connection immediately httpServer.createContext("/bucket/read_blob_no_response", HttpExchange::close); - Exception exception = expectThrows(SdkClientException.class, () -> blobContainer.readBlob("read_blob_no_response")); + Exception exception = expectThrows(SdkClientException.class, + () -> { + if (randomBoolean()) { + blobContainer.readBlob("read_blob_no_response"); + } else { + blobContainer.readBlob("read_blob_no_response", 0, 1); + } + }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), containsString("the target server failed to respond")); assertThat(exception.getCause(), instanceOf(NoHttpResponseException.class)); assertThat(exception.getSuppressed().length, equalTo(0)); @@ -227,12 +336,15 @@ public class S3BlobContainerRetriesTests extends ESTestCase { }); final Exception exception = expectThrows(ConnectionClosedException.class, () -> { - try (InputStream stream = blobContainer.readBlob("read_blob_incomplete")) { + try (InputStream stream = randomBoolean() ? + blobContainer.readBlob("read_blob_incomplete", 0, 1): + blobContainer.readBlob("read_blob_incomplete")) { Streams.readFully(stream); } }); assertThat(exception.getMessage().toLowerCase(Locale.ROOT), - containsString("premature end of content-length delimited message body")); + either(containsString("premature end of chunk coded message body: closing chunk expected")) + .or(containsString("premature end of content-length delimited message body"))); assertThat(exception.getSuppressed().length, equalTo(Math.min(S3RetryingInputStream.MAX_SUPPRESSED_EXCEPTIONS, maxRetries))); } @@ -397,25 +509,49 @@ public class S3BlobContainerRetriesTests extends ESTestCase { return randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb } - private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-9223372036854775806$"); + private static final Pattern RANGE_PATTERN = Pattern.compile("^bytes=([0-9]+)-([0-9]+)$"); - private static int getRangeStart(HttpExchange exchange) { + private static Tuple getRange(HttpExchange exchange) { final String rangeHeader = exchange.getRequestHeaders().getFirst("Range"); if (rangeHeader == null) { - return 0; + return Tuple.tuple(0L, MAX_RANGE_VAL); } final Matcher matcher = RANGE_PATTERN.matcher(rangeHeader); assertTrue(rangeHeader + " matches expected pattern", matcher.matches()); - return Math.toIntExact(Long.parseLong(matcher.group(1))); + long rangeStart = Long.parseLong(matcher.group(1)); + long rangeEnd = Long.parseLong(matcher.group(2)); + assertThat(rangeStart, lessThanOrEqualTo(rangeEnd)); + return Tuple.tuple(rangeStart, rangeEnd); + } + + private static int getRangeStart(HttpExchange exchange) { + return Math.toIntExact(getRange(exchange).v1()); + } + + private static Optional getRangeEnd(HttpExchange exchange) { + final long rangeEnd = getRange(exchange).v2(); + if (rangeEnd == MAX_RANGE_VAL) { + return Optional.empty(); + } + return Optional.of(Math.toIntExact(rangeEnd)); } private static void sendIncompleteContent(HttpExchange exchange, byte[] bytes) throws IOException { final int rangeStart = getRangeStart(exchange); assertThat(rangeStart, lessThan(bytes.length)); + final Optional rangeEnd = getRangeEnd(exchange); + final int length; + if (rangeEnd.isPresent()) { + // adapt range end to be compliant to https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 + final int effectiveRangeEnd = Math.min(rangeEnd.get(), bytes.length - 1); + length = effectiveRangeEnd - rangeStart; + } else { + length = bytes.length - rangeStart - 1; + } exchange.getResponseHeaders().add("Content-Type", "text/plain; charset=utf-8"); - exchange.sendResponseHeaders(HttpStatus.SC_OK, bytes.length - rangeStart); - final int bytesToSend = randomIntBetween(0, bytes.length - rangeStart - 1); + exchange.sendResponseHeaders(HttpStatus.SC_OK, length); + final int bytesToSend = randomIntBetween(0, length - 1); if (bytesToSend > 0) { exchange.getResponseBody().write(bytes, rangeStart, bytesToSend); } diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java index bdedc7c5368..faf239a15b7 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/allocation/TransportClusterAllocationExplainAction.java @@ -33,8 +33,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; -import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; -import org.elasticsearch.cluster.routing.allocation.MoveDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation.DebugMode; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; @@ -43,7 +42,6 @@ import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; -import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.transport.TransportService; @@ -62,20 +60,20 @@ public class TransportClusterAllocationExplainAction private final ClusterInfoService clusterInfoService; private final AllocationDeciders allocationDeciders; private final ShardsAllocator shardAllocator; - private final GatewayAllocator gatewayAllocator; + private final AllocationService allocationService; @Inject public TransportClusterAllocationExplainAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, ClusterInfoService clusterInfoService, AllocationDeciders allocationDeciders, - ShardsAllocator shardAllocator, GatewayAllocator gatewayAllocator) { + ShardsAllocator shardAllocator, AllocationService allocationService) { super(ClusterAllocationExplainAction.NAME, transportService, clusterService, threadPool, actionFilters, ClusterAllocationExplainRequest::new, indexNameExpressionResolver); this.clusterInfoService = clusterInfoService; this.allocationDeciders = allocationDeciders; this.shardAllocator = shardAllocator; - this.gatewayAllocator = gatewayAllocator; + this.allocationService = allocationService; } @Override @@ -105,27 +103,21 @@ public class TransportClusterAllocationExplainAction logger.debug("explaining the allocation for [{}], found shard [{}]", request, shardRouting); ClusterAllocationExplanation cae = explainShard(shardRouting, allocation, - request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), gatewayAllocator, shardAllocator); + request.includeDiskInfo() ? clusterInfo : null, request.includeYesDecisions(), allocationService); listener.onResponse(new ClusterAllocationExplainResponse(cae)); } // public for testing public static ClusterAllocationExplanation explainShard(ShardRouting shardRouting, RoutingAllocation allocation, ClusterInfo clusterInfo, boolean includeYesDecisions, - GatewayAllocator gatewayAllocator, ShardsAllocator shardAllocator) { + AllocationService allocationService) { allocation.setDebugMode(includeYesDecisions ? DebugMode.ON : DebugMode.EXCLUDE_YES_DECISIONS); ShardAllocationDecision shardDecision; if (shardRouting.initializing() || shardRouting.relocating()) { shardDecision = ShardAllocationDecision.NOT_TAKEN; } else { - AllocateUnassignedDecision allocateDecision = shardRouting.unassigned() ? - gatewayAllocator.decideUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN; - if (allocateDecision.isDecisionTaken() == false) { - shardDecision = shardAllocator.decideShardAllocation(shardRouting, allocation); - } else { - shardDecision = new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN); - } + shardDecision = allocationService.explainShardAllocation(shardRouting, allocation); } return new ClusterAllocationExplanation(shardRouting, diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java index 29575bb99c0..622aecc5f4c 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/health/TransportClusterHealthAction.java @@ -36,12 +36,12 @@ import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.Strings; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.unit.TimeValue; -import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.tasks.Task; import org.elasticsearch.threadpool.ThreadPool; @@ -55,15 +55,15 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< private static final Logger logger = LogManager.getLogger(TransportClusterHealthAction.class); - private final GatewayAllocator gatewayAllocator; + private final AllocationService allocationService; @Inject public TransportClusterHealthAction(TransportService transportService, ClusterService clusterService, ThreadPool threadPool, ActionFilters actionFilters, - IndexNameExpressionResolver indexNameExpressionResolver, GatewayAllocator gatewayAllocator) { + IndexNameExpressionResolver indexNameExpressionResolver, AllocationService allocationService) { super(ClusterHealthAction.NAME, false, transportService, clusterService, threadPool, actionFilters, ClusterHealthRequest::new, indexNameExpressionResolver); - this.gatewayAllocator = gatewayAllocator; + this.allocationService = allocationService; } @Override @@ -236,14 +236,14 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction< private boolean validateRequest(final ClusterHealthRequest request, ClusterState clusterState, final int waitCount) { ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), - gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime()); + allocationService.getNumberOfInFlightFetches(), clusterService.getMasterService().getMaxTaskWaitTime()); return prepareResponse(request, response, clusterState, indexNameExpressionResolver) == waitCount; } private ClusterHealthResponse getResponse(final ClusterHealthRequest request, ClusterState clusterState, final int waitFor, boolean timedOut) { ClusterHealthResponse response = clusterHealth(request, clusterState, clusterService.getMasterService().numberOfPendingTasks(), - gatewayAllocator.getNumberOfInFlightFetch(), clusterService.getMasterService().getMaxTaskWaitTime()); + allocationService.getNumberOfInFlightFetches(), clusterService.getMasterService().getMaxTaskWaitTime()); int readyCounter = prepareResponse(request, response, clusterState, indexNameExpressionResolver); boolean valid = (readyCounter == waitFor); assert valid || timedOut; diff --git a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java index a956057c63b..f48054b1488 100644 --- a/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java +++ b/server/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/RestoreSnapshotResponse.java @@ -45,7 +45,7 @@ public class RestoreSnapshotResponse extends ActionResponse implements ToXConten @Nullable private RestoreInfo restoreInfo; - RestoreSnapshotResponse(@Nullable RestoreInfo restoreInfo) { + public RestoreSnapshotResponse(@Nullable RestoreInfo restoreInfo) { this.restoreInfo = restoreInfo; } diff --git a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java index 76f2ea40977..37932da15df 100644 --- a/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java +++ b/server/src/main/java/org/elasticsearch/cluster/ClusterModule.java @@ -37,6 +37,7 @@ import org.elasticsearch.cluster.metadata.MetadataUpdateSettingsService; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.routing.DelayedAllocationService; import org.elasticsearch.cluster.routing.allocation.AllocationService; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.BalancedShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; @@ -103,12 +104,14 @@ public class ClusterModule extends AbstractModule { private final IndexNameExpressionResolver indexNameExpressionResolver; private final AllocationDeciders allocationDeciders; private final AllocationService allocationService; + private final List clusterPlugins; // pkg private for tests final Collection deciderList; final ShardsAllocator shardsAllocator; public ClusterModule(Settings settings, ClusterService clusterService, List clusterPlugins, ClusterInfoService clusterInfoService) { + this.clusterPlugins = clusterPlugins; this.deciderList = createAllocationDeciders(settings, clusterService.getClusterSettings(), clusterPlugins); this.allocationDeciders = new AllocationDeciders(deciderList); this.shardsAllocator = createShardsAllocator(settings, clusterService.getClusterSettings(), clusterPlugins); @@ -297,4 +300,22 @@ public class ClusterModule extends AbstractModule { bind(AllocationDeciders.class).toInstance(allocationDeciders); bind(ShardsAllocator.class).toInstance(shardsAllocator); } + + public void setExistingShardsAllocators(GatewayAllocator gatewayAllocator) { + final Map existingShardsAllocators = new HashMap<>(); + existingShardsAllocators.put(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator); + + for (ClusterPlugin clusterPlugin : clusterPlugins) { + for (Map.Entry existingShardsAllocatorEntry + : clusterPlugin.getExistingShardsAllocators().entrySet()) { + final String allocatorName = existingShardsAllocatorEntry.getKey(); + if (existingShardsAllocators.put(allocatorName, existingShardsAllocatorEntry.getValue()) != null) { + throw new IllegalArgumentException("ExistingShardsAllocator [" + allocatorName + "] from [" + + clusterPlugin.getClass().getName() + "] was already defined"); + } + } + } + allocationService.setExistingShardsAllocators(existingShardsAllocators); + } + } diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java index f4fb8d62bf8..959c1436bf9 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/RoutingNodes.java @@ -29,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Randomness; import org.elasticsearch.common.collect.Tuple; @@ -883,7 +884,7 @@ public class RoutingNodes implements Iterable { ignored.add(shard); } - public class UnassignedIterator implements Iterator { + public class UnassignedIterator implements Iterator, ExistingShardsAllocator.UnassignedAllocationHandler { private final ListIterator iterator; private ShardRouting current; @@ -907,6 +908,7 @@ public class RoutingNodes implements Iterable { * * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. */ + @Override public ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize, RoutingChangesObserver routingChangesObserver) { nodes.ensureMutable(); @@ -922,6 +924,7 @@ public class RoutingNodes implements Iterable { * * @param attempt the result of the allocation attempt */ + @Override public void removeAndIgnore(AllocationStatus attempt, RoutingChangesObserver changes) { nodes.ensureMutable(); innerRemove(); @@ -940,6 +943,7 @@ public class RoutingNodes implements Iterable { * @param recoverySource the new recovery source to use * @return the shard with unassigned info updated */ + @Override public ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource, RoutingChangesObserver changes) { nodes.ensureMutable(); diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java index be5310a39c2..add73e20f2c 100644 --- a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/AllocationService.java @@ -30,6 +30,7 @@ import org.elasticsearch.cluster.health.ClusterStateHealth; import org.elasticsearch.cluster.metadata.AutoExpandReplicas; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.RoutingTable; @@ -39,8 +40,10 @@ import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.common.collect.ImmutableOpenMap; import org.elasticsearch.gateway.GatewayAllocator; +import org.elasticsearch.gateway.PriorityComparator; import java.util.ArrayList; import java.util.Collections; @@ -57,7 +60,6 @@ import static java.util.Collections.emptyList; import static java.util.Collections.singletonList; import static org.elasticsearch.cluster.routing.UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING; - /** * This service manages the node allocation of a cluster. For this reason the * {@link AllocationService} keeps {@link AllocationDeciders} to choose nodes @@ -69,26 +71,31 @@ public class AllocationService { private static final Logger logger = LogManager.getLogger(AllocationService.class); private final AllocationDeciders allocationDeciders; - private GatewayAllocator gatewayAllocator; + private Map existingShardsAllocators; private final ShardsAllocator shardsAllocator; private final ClusterInfoService clusterInfoService; - public AllocationService(AllocationDeciders allocationDeciders, - GatewayAllocator gatewayAllocator, + // only for tests that use the GatewayAllocator as the unique ExistingShardsAllocator + public AllocationService(AllocationDeciders allocationDeciders, GatewayAllocator gatewayAllocator, ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { this(allocationDeciders, shardsAllocator, clusterInfoService); - setGatewayAllocator(gatewayAllocator); + setExistingShardsAllocators(Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, gatewayAllocator)); } - public AllocationService(AllocationDeciders allocationDeciders, - ShardsAllocator shardsAllocator, ClusterInfoService clusterInfoService) { + public AllocationService(AllocationDeciders allocationDeciders, ShardsAllocator shardsAllocator, + ClusterInfoService clusterInfoService) { this.allocationDeciders = allocationDeciders; this.shardsAllocator = shardsAllocator; this.clusterInfoService = clusterInfoService; } - public void setGatewayAllocator(GatewayAllocator gatewayAllocator) { - this.gatewayAllocator = gatewayAllocator; + /** + * Inject the {@link ExistingShardsAllocator}s to use. May only be called once. + */ + public void setExistingShardsAllocators(Map existingShardsAllocators) { + assert this.existingShardsAllocators == null : "cannot set allocators " + existingShardsAllocators + " twice"; + assert existingShardsAllocators.isEmpty() == false : "must add at least one ExistingShardsAllocator"; + this.existingShardsAllocators = Collections.unmodifiableMap(existingShardsAllocators); } /** @@ -98,6 +105,7 @@ public class AllocationService { * If the same instance of the {@link ClusterState} is returned, then no change has been made.

*/ public ClusterState applyStartedShards(ClusterState clusterState, List startedShards) { + assert assertInitialized(); if (startedShards.isEmpty()) { return clusterState; } @@ -108,9 +116,11 @@ public class AllocationService { clusterInfoService.getClusterInfo(), currentNanoTime()); // as starting a primary relocation target can reinitialize replica shards, start replicas first startedShards = new ArrayList<>(startedShards); - Collections.sort(startedShards, Comparator.comparing(ShardRouting::primary)); + startedShards.sort(Comparator.comparing(ShardRouting::primary)); applyStartedShards(allocation, startedShards); - gatewayAllocator.applyStartedShards(allocation, startedShards); + for (final ExistingShardsAllocator allocator : existingShardsAllocators.values()) { + allocator.applyStartedShards(startedShards, allocation); + } assert RoutingNodes.assertShardStats(allocation.routingNodes()); String startedShardsAsString = firstListElementsToCommaDelimitedString(startedShards, s -> s.shardId().toString(), logger.isDebugEnabled()); @@ -171,6 +181,7 @@ public class AllocationService { */ public ClusterState applyFailedShards(final ClusterState clusterState, final List failedShards, final List staleShards) { + assert assertInitialized(); if (staleShards.isEmpty() && failedShards.isEmpty()) { return clusterState; } @@ -216,7 +227,9 @@ public class AllocationService { logger.trace("{} shard routing failed in an earlier iteration (routing: {})", shardToFail.shardId(), shardToFail); } } - gatewayAllocator.applyFailedShards(allocation, failedShards); + for (final ExistingShardsAllocator allocator : existingShardsAllocators.values()) { + allocator.applyFailedShards(failedShards, allocation); + } reroute(allocation); String failedShardsAsString @@ -408,15 +421,43 @@ public class AllocationService { assert hasDeadNodes(allocation) == false : "dead nodes should be explicitly cleaned up. See disassociateDeadNodes"; assert AutoExpandReplicas.getAutoExpandReplicaChanges(allocation.metadata(), allocation).isEmpty() : "auto-expand replicas out of sync with number of nodes in the cluster"; + assert assertInitialized(); removeDelayMarkers(allocation); - // try to allocate existing shard copies first - gatewayAllocator.allocateUnassigned(allocation); + allocateExistingUnassignedShards(allocation); // try to allocate existing shard copies first shardsAllocator.allocate(allocation); assert RoutingNodes.assertShardStats(allocation.routingNodes()); } + private void allocateExistingUnassignedShards(RoutingAllocation allocation) { + allocation.routingNodes().unassigned().sort(PriorityComparator.getAllocationComparator(allocation)); // sort for priority ordering + + for (final ExistingShardsAllocator existingShardsAllocator : existingShardsAllocators.values()) { + existingShardsAllocator.beforeAllocation(allocation); + } + + final RoutingNodes.UnassignedShards.UnassignedIterator primaryIterator = allocation.routingNodes().unassigned().iterator(); + while (primaryIterator.hasNext()) { + final ShardRouting shardRouting = primaryIterator.next(); + if (shardRouting.primary()) { + getAllocatorForShard(shardRouting, allocation).allocateUnassigned(shardRouting, allocation, primaryIterator); + } + } + + for (final ExistingShardsAllocator existingShardsAllocator : existingShardsAllocators.values()) { + existingShardsAllocator.afterPrimariesBeforeReplicas(allocation); + } + + final RoutingNodes.UnassignedShards.UnassignedIterator replicaIterator = allocation.routingNodes().unassigned().iterator(); + while (replicaIterator.hasNext()) { + final ShardRouting shardRouting = replicaIterator.next(); + if (shardRouting.primary() == false) { + getAllocatorForShard(shardRouting, allocation).allocateUnassigned(shardRouting, allocation, replicaIterator); + } + } + } + private void disassociateDeadNodes(RoutingAllocation allocation) { for (Iterator it = allocation.routingNodes().mutableIterator(); it.hasNext(); ) { RoutingNode node = it.next(); @@ -454,9 +495,11 @@ public class AllocationService { } } + /** + * Create a mutable {@link RoutingNodes}. This is a costly operation so this must only be called once! + */ private RoutingNodes getMutableRoutingNodes(ClusterState clusterState) { - RoutingNodes routingNodes = new RoutingNodes(clusterState, false); // this is a costly operation - only call this once! - return routingNodes; + return new RoutingNodes(clusterState, false); } /** override this to control time based decisions during allocation */ @@ -465,7 +508,103 @@ public class AllocationService { } public void cleanCaches() { - gatewayAllocator.cleanCaches(); + assert assertInitialized(); + existingShardsAllocators.values().forEach(ExistingShardsAllocator::cleanCaches); + } + + public int getNumberOfInFlightFetches() { + assert assertInitialized(); + return existingShardsAllocators.values().stream().mapToInt(ExistingShardsAllocator::getNumberOfInFlightFetches).sum(); + } + + public ShardAllocationDecision explainShardAllocation(ShardRouting shardRouting, RoutingAllocation allocation) { + assert allocation.debugDecision(); + AllocateUnassignedDecision allocateDecision + = shardRouting.unassigned() ? explainUnassignedShardAllocation(shardRouting, allocation) : AllocateUnassignedDecision.NOT_TAKEN; + if (allocateDecision.isDecisionTaken()) { + return new ShardAllocationDecision(allocateDecision, MoveDecision.NOT_TAKEN); + } else { + return shardsAllocator.decideShardAllocation(shardRouting, allocation); + } + } + + private AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting shardRouting, RoutingAllocation routingAllocation) { + assert shardRouting.unassigned(); + assert routingAllocation.debugDecision(); + assert assertInitialized(); + final ExistingShardsAllocator existingShardsAllocator = getAllocatorForShard(shardRouting, routingAllocation); + final AllocateUnassignedDecision decision + = existingShardsAllocator.explainUnassignedShardAllocation(shardRouting, routingAllocation); + if (decision.isDecisionTaken()) { + return decision; + } + return AllocateUnassignedDecision.NOT_TAKEN; + } + + private ExistingShardsAllocator getAllocatorForShard(ShardRouting shardRouting, RoutingAllocation routingAllocation) { + assert assertInitialized(); + final String allocatorName = ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get( + routingAllocation.metadata().getIndexSafe(shardRouting.index()).getSettings()); + final ExistingShardsAllocator existingShardsAllocator = existingShardsAllocators.get(allocatorName); + return existingShardsAllocator != null ? existingShardsAllocator : new NotFoundAllocator(allocatorName); + } + + private boolean assertInitialized() { + assert existingShardsAllocators != null: "must have set allocators first"; + return true; + } + + private static class NotFoundAllocator implements ExistingShardsAllocator { + private final String allocatorName; + + private NotFoundAllocator(String allocatorName) { + this.allocatorName = allocatorName; + } + + @Override + public void beforeAllocation(RoutingAllocation allocation) { + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + } + + @Override + public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler) { + unassignedAllocationHandler.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes()); + } + + @Override + public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation allocation) { + assert unassignedShard.unassigned(); + assert allocation.debugDecision(); + final List nodeAllocationResults = new ArrayList<>(allocation.nodes().getSize()); + for (DiscoveryNode discoveryNode : allocation.nodes()) { + nodeAllocationResults.add(new NodeAllocationResult(discoveryNode, null, allocation.decision(Decision.NO, + "allocator_plugin", "finding the previous copies of this shard requires an allocator called [%s] but " + + "that allocator was not found; perhaps the corresponding plugin is not installed", + allocatorName))); + } + return AllocateUnassignedDecision.no(AllocationStatus.NO_VALID_SHARD_COPY, nodeAllocationResults); + } + + @Override + public void cleanCaches() { + } + + @Override + public void applyStartedShards(List startedShards, RoutingAllocation allocation) { + } + + @Override + public void applyFailedShards(List failedShards, RoutingAllocation allocation) { + } + + @Override + public int getNumberOfInFlightFetches() { + return 0; + } } /** diff --git a/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java new file mode 100644 index 00000000000..7731a4f67f2 --- /dev/null +++ b/server/src/main/java/org/elasticsearch/cluster/routing/allocation/ExistingShardsAllocator.java @@ -0,0 +1,123 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.cluster.routing.allocation; + +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingChangesObserver; +import org.elasticsearch.cluster.routing.RoutingNodes; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.gateway.GatewayAllocator; + +import java.util.List; + +/** + * Searches for, and allocates, shards for which there is an existing on-disk copy somewhere in the cluster. The default implementation is + * {@link GatewayAllocator}, but plugins can supply their own implementations too. + */ +public interface ExistingShardsAllocator { + + /** + * Allows plugins to override how we allocate shards that may already exist on disk in the cluster. + */ + Setting EXISTING_SHARDS_ALLOCATOR_SETTING = Setting.simpleString( + "index.allocation.existing_shards_allocator", GatewayAllocator.ALLOCATOR_NAME, + Setting.Property.IndexScope, Setting.Property.PrivateIndex); + + /** + * Called before starting a round of allocation, allowing the allocator to invalidate some caches if appropriate. + */ + void beforeAllocation(RoutingAllocation allocation); + + /** + * Called during a round of allocation after attempting to allocate all the primaries but before any replicas, allowing the allocator + * to prepare for replica allocation. + */ + void afterPrimariesBeforeReplicas(RoutingAllocation allocation); + + /** + * Allocate any unassigned shards in the given {@link RoutingAllocation} for which this {@link ExistingShardsAllocator} is responsible. + */ + void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler); + + /** + * Returns an explanation for a single unassigned shard. + */ + AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation); + + /** + * Called when this node becomes the elected master and when it stops being the elected master, so that implementations can clean up any + * in-flight activity from an earlier mastership. + */ + void cleanCaches(); + + /** + * Called when the given shards have started, so that implementations can invalidate caches and clean up any in-flight activity for + * those shards. + */ + void applyStartedShards(List startedShards, RoutingAllocation allocation); + + /** + * Called when the given shards have failed, so that implementations can invalidate caches and clean up any in-flight activity for + * those shards. + */ + void applyFailedShards(List failedShards, RoutingAllocation allocation); + + /** + * @return the number of in-flight fetches under this allocator's control. + */ + int getNumberOfInFlightFetches(); + + /** + * Used by {@link ExistingShardsAllocator#allocateUnassigned} to handle its allocation decisions. A restricted interface to + * {@link RoutingNodes.UnassignedShards.UnassignedIterator} to limit what allocators can do. + */ + interface UnassignedAllocationHandler { + + /** + * Initializes the current unassigned shard and moves it from the unassigned list. + * + * @param existingAllocationId allocation id to use. If null, a fresh allocation id is generated. + */ + ShardRouting initialize(String nodeId, @Nullable String existingAllocationId, long expectedShardSize, + RoutingChangesObserver routingChangesObserver); + + /** + * Removes and ignores the unassigned shard (will be ignored for this run, but + * will be added back to unassigned once the metadata is constructed again). + * Typically this is used when an allocation decision prevents a shard from being allocated such + * that subsequent consumers of this API won't try to allocate this shard again. + * + * @param attempt the result of the allocation attempt + */ + void removeAndIgnore(UnassignedInfo.AllocationStatus attempt, RoutingChangesObserver changes); + + /** + * updates the unassigned info and recovery source on the current unassigned shard + * + * @param unassignedInfo the new unassigned info to use + * @param recoverySource the new recovery source to use + * @return the shard with unassigned info updated + */ + ShardRouting updateUnassigned(UnassignedInfo unassignedInfo, RecoverySource recoverySource, RoutingChangesObserver changes); + } +} diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java index 5194d841451..8040d1775a6 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/BlobContainer.java @@ -49,6 +49,40 @@ public interface BlobContainer { */ InputStream readBlob(String blobName) throws IOException; + /** + * Creates a new {@link InputStream} that can be used to read the given blob starting from + * a specific {@code position} in the blob. The {@code length} is an indication of the + * number of bytes that are expected to be read from the {@link InputStream}. + * + * @param blobName The name of the blob to get an {@link InputStream} for. + * @param position The position in the blob where the next byte will be read. + * @param length An indication of the number of bytes to be read. + * @return The {@code InputStream} to read the blob. + * @throws NoSuchFileException if the blob does not exist + * @throws IOException if the blob can not be read. + */ + default InputStream readBlob(final String blobName, final long position, final long length) throws IOException { + throw new UnsupportedOperationException(); // NORELEASE + } + + /** + * Provides a hint to clients for a suitable length to use with {@link BlobContainer#readBlob(String, long, long)}. + * + * Some blob containers have nontrivial costs attached to each readBlob call, so it is a good idea for consumers to speculatively + * request more data than they need right now and to re-use this stream for future needs if possible. + * + * Also, some blob containers return streams that are expensive to close before the stream has been fully consumed, and the cost may + * depend on the length of the data that was left unconsumed. For these containers it's best to bound the cost of a partial read by + * bounding the length of the data requested. + * + * @return a hint to consumers regarding the length of data to request if there is a good chance that future reads can be satisfied from + * the same stream. + * + */ + default long readBlobPreferredLength() { + throw new UnsupportedOperationException(); // NORELEASE + } + /** * Reads blob content from the input stream and writes it to the container in a new blob with the given name. * This method assumes the container does not already contain a blob of the same blobName. If a blob by the diff --git a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java index f92543937fe..fa2437118ae 100644 --- a/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java +++ b/server/src/main/java/org/elasticsearch/common/blobstore/fs/FsBlobContainer.java @@ -34,6 +34,8 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; +import java.nio.channels.Channels; +import java.nio.channels.SeekableByteChannel; import java.nio.file.DirectoryStream; import java.nio.file.FileAlreadyExistsException; import java.nio.file.FileVisitResult; @@ -142,16 +144,36 @@ public class FsBlobContainer extends AbstractBlobContainer { IOUtils.rm(blobNames.stream().map(path::resolve).toArray(Path[]::new)); } + private InputStream bufferedInputStream(InputStream inputStream) { + return new BufferedInputStream(inputStream, blobStore.bufferSizeInBytes()); + } + @Override public InputStream readBlob(String name) throws IOException { final Path resolvedPath = path.resolve(name); try { - return new BufferedInputStream(Files.newInputStream(resolvedPath), blobStore.bufferSizeInBytes()); + return bufferedInputStream(Files.newInputStream(resolvedPath)); } catch (FileNotFoundException fnfe) { throw new NoSuchFileException("[" + name + "] blob not found"); } } + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + final SeekableByteChannel channel = Files.newByteChannel(path.resolve(blobName)); + if (position > 0L) { + channel.position(position); + } + assert channel.position() == position; + return bufferedInputStream(org.elasticsearch.common.io.Streams.limitStream(Channels.newInputStream(channel), length)); + } + + @Override + public long readBlobPreferredLength() { + // This container returns streams that are cheap to close early, so we can tell consumers to request as much data as possible. + return Long.MAX_VALUE; + } + @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { if (failIfAlreadyExists == false) { diff --git a/server/src/main/java/org/elasticsearch/common/io/Streams.java b/server/src/main/java/org/elasticsearch/common/io/Streams.java index 222f94e65ef..be2c7b4d167 100644 --- a/server/src/main/java/org/elasticsearch/common/io/Streams.java +++ b/server/src/main/java/org/elasticsearch/common/io/Streams.java @@ -205,6 +205,13 @@ public abstract class Streams { return read; } + /** + * Fully consumes the input stream, throwing the bytes away. Returns the number of bytes consumed. + */ + public static long consumeFully(InputStream inputStream) throws IOException { + return copy(inputStream, new NullOutputStream()); + } + public static List readAllLines(InputStream input) throws IOException { final List lines = new ArrayList<>(); readAllLines(input, lines::add); @@ -254,6 +261,13 @@ public abstract class Streams { } } + /** + * Limits the given input stream to the provided number of bytes + */ + public static InputStream limitStream(InputStream in, long limit) { + return new LimitedInputStream(in, limit); + } + /** * A wrapper around a {@link BytesStream} that makes the close operation a flush. This is * needed as sometimes a stream will be closed but the bytes that the stream holds still need @@ -297,4 +311,93 @@ public abstract class Streams { return delegate.bytes(); } } + + /** + * A wrapper around an {@link InputStream} that limits the number of bytes that can be read from the stream. + */ + static class LimitedInputStream extends FilterInputStream { + + private static final long NO_MARK = -1L; + + private long currentLimit; // is always non-negative + private long limitOnLastMark; + + LimitedInputStream(InputStream in, long limit) { + super(in); + if (limit < 0L) { + throw new IllegalArgumentException("limit must be non-negative"); + } + this.currentLimit = limit; + this.limitOnLastMark = NO_MARK; + } + + @Override + public int read() throws IOException { + final int result; + if (currentLimit == 0 || (result = in.read()) == -1) { + return -1; + } else { + currentLimit--; + return result; + } + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + final int result; + if (currentLimit == 0 || (result = in.read(b, off, Math.toIntExact(Math.min(len, currentLimit)))) == -1) { + return -1; + } else { + currentLimit -= result; + return result; + } + } + + @Override + public long skip(long n) throws IOException { + final long skipped = in.skip(Math.min(n, currentLimit)); + currentLimit -= skipped; + return skipped; + } + + @Override + public int available() throws IOException { + return Math.toIntExact(Math.min(in.available(), currentLimit)); + } + + @Override + public void close() throws IOException { + in.close(); + } + + @Override + public synchronized void mark(int readlimit) { + in.mark(readlimit); + limitOnLastMark = currentLimit; + } + + @Override + public synchronized void reset() throws IOException { + in.reset(); + if (limitOnLastMark != NO_MARK) { + currentLimit = limitOnLastMark; + } + } + } + + /** + * OutputStream that just throws all the bytes away + */ + static class NullOutputStream extends OutputStream { + + @Override + public void write(int b) { + + } + + @Override + public void write(byte[] b, int off, int len) { + + } + } } diff --git a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java index 7704ed44c83..b5454332dc7 100644 --- a/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java +++ b/server/src/main/java/org/elasticsearch/common/settings/IndexScopedSettings.java @@ -21,6 +21,7 @@ package org.elasticsearch.common.settings; import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.metadata.MetadataIndexStateService; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider; import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider; @@ -171,6 +172,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings { IndexSettings.FINAL_PIPELINE, MetadataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING, IndexSettings.ON_HEAP_ID_TERMS_INDEX, + ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING, // validate that built-in similarities don't get redefined Setting.groupSetting("index.similarity.", (s) -> { diff --git a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java index d8b96550ad0..30e6c200402 100644 --- a/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/BaseGatewayShardAllocator.java @@ -22,10 +22,10 @@ package org.elasticsearch.gateway; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.decider.Decision; @@ -45,40 +45,37 @@ public abstract class BaseGatewayShardAllocator { protected final Logger logger = LogManager.getLogger(this.getClass()); /** - * Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist. + * Allocate an unassigned shard to nodes (if any) where valid copies of the shard already exist. * It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)} * to make decisions on assigning shards to nodes. - * + * @param shardRouting the shard to allocate * @param allocation the allocation state container object + * @param unassignedAllocationHandler handles the allocation of the current shard */ - public void allocateUnassigned(RoutingAllocation allocation) { - final RoutingNodes routingNodes = allocation.routingNodes(); - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); - while (unassignedIterator.hasNext()) { - final ShardRouting shard = unassignedIterator.next(); - final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shard, allocation, logger); + public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + ExistingShardsAllocator.UnassignedAllocationHandler unassignedAllocationHandler) { + final AllocateUnassignedDecision allocateUnassignedDecision = makeAllocationDecision(shardRouting, allocation, logger); - if (allocateUnassignedDecision.isDecisionTaken() == false) { - // no decision was taken by this allocator - continue; - } + if (allocateUnassignedDecision.isDecisionTaken() == false) { + // no decision was taken by this allocator + return; + } - if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) { - unassignedIterator.initialize(allocateUnassignedDecision.getTargetNode().getId(), - allocateUnassignedDecision.getAllocationId(), - shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE : - allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), - allocation.changes()); - } else { - unassignedIterator.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes()); - } + if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) { + unassignedAllocationHandler.initialize(allocateUnassignedDecision.getTargetNode().getId(), + allocateUnassignedDecision.getAllocationId(), + shardRouting.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE : + allocation.clusterInfo().getShardSize(shardRouting, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), + allocation.changes()); + } else { + unassignedAllocationHandler.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes()); } } /** * Make a decision on the allocation of an unassigned shard. This method is used by - * {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not - * the shard can be allocated by this allocator and if so, to which node it will be allocated. + * {@link #allocateUnassigned(ShardRouting, RoutingAllocation, ExistingShardsAllocator.UnassignedAllocationHandler)} to make decisions + * about whether or not the shard can be allocated by this allocator and if so, to which node it will be allocated. * * @param unassignedShard the unassigned shard to allocate * @param allocation the current routing state diff --git a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java index 61fdb85a4f9..c33f2b2b900 100644 --- a/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java +++ b/server/src/main/java/org/elasticsearch/gateway/GatewayAllocator.java @@ -30,9 +30,9 @@ import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.routing.RerouteService; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.FailedShard; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.common.Priority; @@ -50,7 +50,9 @@ import java.util.concurrent.ConcurrentMap; import java.util.stream.Collectors; import java.util.stream.StreamSupport; -public class GatewayAllocator { +public class GatewayAllocator implements ExistingShardsAllocator { + + public static final String ALLOCATOR_NAME = "gateway_allocator"; private static final Logger logger = LogManager.getLogger(GatewayAllocator.class); @@ -74,6 +76,7 @@ public class GatewayAllocator { this.replicaShardAllocator = new InternalReplicaShardAllocator(storeAction); } + @Override public void cleanCaches() { Releasables.close(asyncFetchStarted.values()); asyncFetchStarted.clear(); @@ -88,7 +91,8 @@ public class GatewayAllocator { this.replicaShardAllocator = null; } - public int getNumberOfInFlightFetch() { + @Override + public int getNumberOfInFlightFetches() { int count = 0; for (AsyncShardFetch fetch : asyncFetchStarted.values()) { count += fetch.getNumberOfInFlightFetches(); @@ -99,47 +103,64 @@ public class GatewayAllocator { return count; } - public void applyStartedShards(final RoutingAllocation allocation, final List startedShards) { + @Override + public void applyStartedShards(final List startedShards, final RoutingAllocation allocation) { for (ShardRouting startedShard : startedShards) { Releasables.close(asyncFetchStarted.remove(startedShard.shardId())); Releasables.close(asyncFetchStore.remove(startedShard.shardId())); } } - public void applyFailedShards(final RoutingAllocation allocation, final List failedShards) { + @Override + public void applyFailedShards(final List failedShards, final RoutingAllocation allocation) { for (FailedShard failedShard : failedShards) { Releasables.close(asyncFetchStarted.remove(failedShard.getRoutingEntry().shardId())); Releasables.close(asyncFetchStore.remove(failedShard.getRoutingEntry().shardId())); } } - public void allocateUnassigned(final RoutingAllocation allocation) { + @Override + public void beforeAllocation(final RoutingAllocation allocation) { assert primaryShardAllocator != null; assert replicaShardAllocator != null; ensureAsyncFetchStorePrimaryRecency(allocation); - innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator); + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + assert replicaShardAllocator != null; + if (allocation.routingNodes().hasInactiveShards()) { + // cancel existing recoveries if we have a better match + replicaShardAllocator.processExistingRecoveries(allocation); + } + } + + @Override + public void allocateUnassigned(ShardRouting shardRouting, final RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler) { + assert primaryShardAllocator != null; + assert replicaShardAllocator != null; + innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator, shardRouting, unassignedAllocationHandler); } // allow for testing infra to change shard allocators implementation protected static void innerAllocatedUnassigned(RoutingAllocation allocation, PrimaryShardAllocator primaryShardAllocator, - ReplicaShardAllocator replicaShardAllocator) { - RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned(); - unassigned.sort(PriorityComparator.getAllocationComparator(allocation)); // sort for priority ordering - - primaryShardAllocator.allocateUnassigned(allocation); - if (allocation.routingNodes().hasInactiveShards()) { - // cancel existing recoveries if we have a better match - replicaShardAllocator.processExistingRecoveries(allocation); + ReplicaShardAllocator replicaShardAllocator, + ShardRouting shardRouting, + ExistingShardsAllocator.UnassignedAllocationHandler unassignedAllocationHandler) { + assert shardRouting.unassigned(); + if (shardRouting.primary()) { + primaryShardAllocator.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler); + } else { + replicaShardAllocator.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler); } - replicaShardAllocator.allocateUnassigned(allocation); } - /** - * Computes and returns the design for allocating a single unassigned shard. If called on an assigned shard, - * {@link AllocateUnassignedDecision#NOT_TAKEN} is returned. - */ - public AllocateUnassignedDecision decideUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) { + @Override + public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting unassignedShard, RoutingAllocation routingAllocation) { + assert unassignedShard.unassigned(); + assert routingAllocation.debugDecision(); if (unassignedShard.primary()) { assert primaryShardAllocator != null; return primaryShardAllocator.makeAllocationDecision(unassignedShard, routingAllocation, logger); diff --git a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java index d4e63620852..579e4f5a33f 100644 --- a/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/CompositeIndexEventListener.java @@ -271,4 +271,17 @@ final class CompositeIndexEventListener implements IndexEventListener { } } } + + @Override + public void beforeIndexShardRecovery(final IndexShard indexShard, final IndexSettings indexSettings) { + for (IndexEventListener listener : listeners) { + try { + listener.beforeIndexShardRecovery(indexShard, indexSettings); + } catch (Exception e) { + logger.warn(() -> new ParameterizedMessage("failed to invoke the listener before the shard recovery starts for {}", + indexShard.shardId()), e); + throw e; + } + } + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java index 982b42b2c3f..81ea941d7cb 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexEventListener.java @@ -174,4 +174,15 @@ public interface IndexEventListener { * @param shardId the shard ID the store belongs to */ default void onStoreClosed(ShardId shardId) {} + + /** + * Called before the index shard starts to recover. + * Note: unlike all other methods in this class, this method is not called using the cluster state update thread. When this method is + * called the shard already transitioned to the RECOVERING state. + * + * @param indexShard the shard that is about to recover + * @param indexSettings the shard's index settings + */ + default void beforeIndexShardRecovery(IndexShard indexShard, IndexSettings indexSettings) { + } } diff --git a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java index 6a1b8462252..faddeae60df 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java +++ b/server/src/main/java/org/elasticsearch/index/shard/IndexShard.java @@ -1354,6 +1354,15 @@ public class IndexShard extends AbstractIndexShardComponent implements IndicesCl } } + public void preRecovery() { + final IndexShardState currentState = this.state; // single volatile read + if (currentState == IndexShardState.CLOSED) { + throw new IndexShardNotRecoveringException(shardId, currentState); + } + assert currentState == IndexShardState.RECOVERING : "expected a recovering shard " + shardId + " but got " + currentState; + indexEventListener.beforeIndexShardRecovery(this, indexSettings); + } + public void postRecovery(String reason) throws IndexShardStartedException, IndexShardRelocatedException, IndexShardClosedException { synchronized (postRecoveryMutex) { // we need to refresh again to expose all operations that were index until now. Otherwise diff --git a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java index f156c4e881b..5d97f623cd5 100644 --- a/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java +++ b/server/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java @@ -369,6 +369,7 @@ final class StoreRecovery { * Recovers the state of the shard from the store. */ private void internalRecoverFromStore(IndexShard indexShard) throws IndexShardRecoveryException { + indexShard.preRecovery(); final RecoveryState recoveryState = indexShard.recoveryState(); final boolean indexShouldExists = recoveryState.getRecoverySource().getType() != RecoverySource.Type.EMPTY_STORE; indexShard.prepareForIndexRecovery(); @@ -460,6 +461,7 @@ final class StoreRecovery { private void restore(IndexShard indexShard, Repository repository, SnapshotRecoverySource restoreSource, ActionListener listener) { logger.debug("restoring from {} ...", indexShard.recoveryState().getRecoverySource()); + indexShard.preRecovery(); final RecoveryState.Translog translogState = indexShard.recoveryState().getTranslog(); if (restoreSource == null) { listener.onFailure(new IndexShardRestoreFailedException(shardId, "empty restore source")); diff --git a/server/src/main/java/org/elasticsearch/index/translog/Translog.java b/server/src/main/java/org/elasticsearch/index/translog/Translog.java index 452d65ffcfd..202c363e712 100644 --- a/server/src/main/java/org/elasticsearch/index/translog/Translog.java +++ b/server/src/main/java/org/elasticsearch/index/translog/Translog.java @@ -23,6 +23,8 @@ import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.lucene.index.Term; import org.apache.lucene.store.AlreadyClosedException; import org.elasticsearch.Version; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.bytes.BytesArray; import org.elasticsearch.common.bytes.BytesReference; @@ -1852,20 +1854,58 @@ public class Translog extends AbstractIndexShardComponent implements IndexShardC static String createEmptyTranslog(Path location, long initialGlobalCheckpoint, ShardId shardId, ChannelFactory channelFactory, long primaryTerm) throws IOException { + return createEmptyTranslog(location, shardId, initialGlobalCheckpoint, primaryTerm, null, channelFactory); + } + + /** + * Creates a new empty translog within the specified {@code location} that contains the given {@code initialGlobalCheckpoint}, + * {@code primaryTerm} and {@code translogUUID}. + * + * This method should be used directly under specific circumstances like for shards that will see no indexing. Specifying a non-unique + * translog UUID could cause a lot of issues and that's why in all (but one) cases the method + * {@link #createEmptyTranslog(Path, long, ShardId, long)} should be used instead. + * + * @param location a {@link Path} to the directory that will contains the translog files (translog + translog checkpoint) + * @param shardId the {@link ShardId} + * @param initialGlobalCheckpoint the global checkpoint to initialize the translog with + * @param primaryTerm the shard's primary term to initialize the translog with + * @param translogUUID the unique identifier to initialize the translog with + * @param factory a {@link ChannelFactory} used to open translog files + * @return the translog's unique identifier + * @throws IOException if something went wrong during translog creation + */ + public static String createEmptyTranslog(final Path location, + final ShardId shardId, + final long initialGlobalCheckpoint, + final long primaryTerm, + @Nullable final String translogUUID, + @Nullable final ChannelFactory factory) throws IOException { IOUtils.rm(location); Files.createDirectories(location); - final Checkpoint checkpoint = - Checkpoint.emptyTranslogCheckpoint(0, 1, initialGlobalCheckpoint, 1); + + final long generation = 1L; + final long minTranslogGeneration = 1L; + final ChannelFactory channelFactory = factory != null ? factory : FileChannel::open; + final String uuid = Strings.hasLength(translogUUID) ? translogUUID : UUIDs.randomBase64UUID(); final Path checkpointFile = location.resolve(CHECKPOINT_FILE_NAME); + final Path translogFile = location.resolve(getFilename(generation)); + final Checkpoint checkpoint = Checkpoint.emptyTranslogCheckpoint(0, generation, initialGlobalCheckpoint, minTranslogGeneration); + Checkpoint.write(channelFactory, checkpointFile, checkpoint, StandardOpenOption.WRITE, StandardOpenOption.CREATE_NEW); IOUtils.fsync(checkpointFile, false); - final String translogUUID = UUIDs.randomBase64UUID(); - TranslogWriter writer = TranslogWriter.create(shardId, translogUUID, 1, - location.resolve(getFilename(1)), channelFactory, - new ByteSizeValue(10), 1, initialGlobalCheckpoint, - () -> { throw new UnsupportedOperationException(); }, () -> { throw new UnsupportedOperationException(); }, primaryTerm, - new TragicExceptionHolder(), seqNo -> { throw new UnsupportedOperationException(); }); + final TranslogWriter writer = TranslogWriter.create(shardId, uuid, generation, translogFile, channelFactory, + new ByteSizeValue(10), minTranslogGeneration, initialGlobalCheckpoint, + () -> { + throw new UnsupportedOperationException(); + }, () -> { + throw new UnsupportedOperationException(); + }, + primaryTerm, + new TragicExceptionHolder(), + seqNo -> { + throw new UnsupportedOperationException(); + }); writer.close(); - return translogUUID; + return uuid; } } diff --git a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java index bd447806dc0..c0a8cfbe432 100644 --- a/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java +++ b/server/src/main/java/org/elasticsearch/indices/recovery/PeerRecoveryTargetService.java @@ -172,10 +172,12 @@ public class PeerRecoveryTargetService implements IndexEventListener { timer = recoveryTarget.state().getTimer(); cancellableThreads = recoveryTarget.cancellableThreads(); try { + final IndexShard indexShard = recoveryTarget.indexShard(); + indexShard.preRecovery(); assert recoveryTarget.sourceNode() != null : "can not do a recovery without a source node"; logger.trace("{} preparing shard for peer recovery", recoveryTarget.shardId()); - recoveryTarget.indexShard().prepareForIndexRecovery(); - final long startingSeqNo = recoveryTarget.indexShard().recoverLocallyUpToGlobalCheckpoint(); + indexShard.prepareForIndexRecovery(); + final long startingSeqNo = indexShard.recoverLocallyUpToGlobalCheckpoint(); assert startingSeqNo == UNASSIGNED_SEQ_NO || recoveryTarget.state().getStage() == RecoveryState.Stage.TRANSLOG : "unexpected recovery stage [" + recoveryTarget.state().getStage() + "] starting seqno [ " + startingSeqNo + "]"; request = getStartRecoveryRequest(logger, clusterService.localNode(), recoveryTarget, startingSeqNo); diff --git a/server/src/main/java/org/elasticsearch/node/Node.java b/server/src/main/java/org/elasticsearch/node/Node.java index 4c612e72279..89dba3f39d6 100644 --- a/server/src/main/java/org/elasticsearch/node/Node.java +++ b/server/src/main/java/org/elasticsearch/node/Node.java @@ -616,8 +616,12 @@ public class Node implements Closeable { ); injector = modules.createInjector(); - // TODO hack around circular dependencies problems in AllocationService - clusterModule.getAllocationService().setGatewayAllocator(injector.getInstance(GatewayAllocator.class)); + // We allocate copies of existing shards by looking for a viable copy of the shard in the cluster and assigning the shard there. + // The search for viable copies is triggered by an allocation attempt (i.e. a reroute) and is performed asynchronously. When it + // completes we trigger another reroute to try the allocation again. This means there is a circular dependency: the allocation + // service needs access to the existing shards allocators (e.g. the GatewayAllocator) which need to be able to trigger a + // reroute, which needs to call into the allocation service. We close the loop here: + clusterModule.setExistingShardsAllocators(injector.getInstance(GatewayAllocator.class)); List pluginLifecycleComponents = pluginComponents.stream() .filter(p -> p instanceof LifecycleComponent) diff --git a/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java b/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java index a1274b9346c..b04874b4e65 100644 --- a/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/ClusterPlugin.java @@ -19,16 +19,17 @@ package org.elasticsearch.plugins; -import java.util.Collection; -import java.util.Collections; -import java.util.Map; -import java.util.function.Supplier; - +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; import org.elasticsearch.cluster.routing.allocation.decider.AllocationDecider; import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.Settings; +import java.util.Collection; +import java.util.Collections; +import java.util.Map; +import java.util.function.Supplier; + /** * An extension point for {@link Plugin} implementations to customer behavior of cluster management. */ @@ -59,6 +60,15 @@ public interface ClusterPlugin { return Collections.emptyMap(); } + /** + * Return {@link ExistingShardsAllocator} implementations added by this plugin; the index setting + * {@link ExistingShardsAllocator#EXISTING_SHARDS_ALLOCATOR_SETTING} sets the key of the allocator to use to allocate its shards. The + * default allocator is {@link org.elasticsearch.gateway.GatewayAllocator}. + */ + default Map getExistingShardsAllocators() { + return Collections.emptyMap(); + } + /** * Called when the node is started */ diff --git a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java index 1ac61b27fd1..4011834c31c 100644 --- a/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java +++ b/server/src/main/java/org/elasticsearch/plugins/RepositoryPlugin.java @@ -25,6 +25,7 @@ import java.util.Map; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.env.Environment; +import org.elasticsearch.repositories.RepositoriesModule; import org.elasticsearch.repositories.Repository; /** @@ -58,4 +59,13 @@ public interface RepositoryPlugin { ClusterService clusterService) { return Collections.emptyMap(); } + + /** + * Passes down the current {@link RepositoriesModule} to repository plugins. + * + * @param module the current {@link RepositoriesModule} + */ + default void onRepositoriesModule(RepositoriesModule module) { + // NORELEASE + } } diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java index f87aab460fc..33bc5d42ed2 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoriesModule.java @@ -73,6 +73,8 @@ public final class RepositoriesModule { Map internalRepositoryTypes = Collections.unmodifiableMap(internalFactories); repositoriesService = new RepositoriesService(settings, clusterService, transportService, repositoryTypes, internalRepositoryTypes, threadPool); + + repoPlugins.forEach(rp -> rp.onRepositoriesModule(this)); } public RepositoriesService getRepositoryService() { diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index f878e013b04..61f409e4355 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -1013,7 +1013,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp return shardContainer(indexId, shardId.getId()); } - private BlobContainer shardContainer(IndexId indexId, int shardId) { + public BlobContainer shardContainer(IndexId indexId, int shardId) { return blobStore().blobContainer(indicesPath().add(indexId.getId()).add(Integer.toString(shardId))); } @@ -1045,8 +1045,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } protected void assertSnapshotOrGenericThread() { - assert Thread.currentThread().getName().contains(ThreadPool.Names.SNAPSHOT) - || Thread.currentThread().getName().contains(ThreadPool.Names.GENERIC) : + assert Thread.currentThread().getName().contains('[' + ThreadPool.Names.SNAPSHOT + ']') + || Thread.currentThread().getName().contains('[' + ThreadPool.Names.GENERIC + ']') : "Expected current thread [" + Thread.currentThread() + "] to be the snapshot or generic thread."; } @@ -1984,7 +1984,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp /** * Loads information about shard snapshot */ - private BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) { + public BlobStoreIndexShardSnapshot loadShardSnapshot(BlobContainer shardContainer, SnapshotId snapshotId) { try { return indexShardSnapshotFormat.read(shardContainer, snapshotId.getUUID()); } catch (NoSuchFileException ex) { diff --git a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java index d0a55972cc1..2e7e76702d4 100644 --- a/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java +++ b/server/src/test/java/org/elasticsearch/action/admin/cluster/allocation/ClusterAllocationExplainActionTests.java @@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRoutingState; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; @@ -57,7 +58,7 @@ public class ClusterAllocationExplainActionTests extends ESTestCase { RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()), clusterState.getRoutingNodes(), clusterState, null, System.nanoTime()); ClusterAllocationExplanation cae = TransportClusterAllocationExplainAction.explainShard(shard, allocation, null, randomBoolean(), - new TestGatewayAllocator(), new ShardsAllocator() { + new AllocationService(null, new TestGatewayAllocator(), new ShardsAllocator() { @Override public void allocate(RoutingAllocation allocation) { // no-op @@ -71,7 +72,7 @@ public class ClusterAllocationExplainActionTests extends ESTestCase { throw new UnsupportedOperationException("cannot explain"); } } - }); + }, null)); assertEquals(shard.currentNodeId(), cae.getCurrentNode().getId()); assertFalse(cae.getShardAllocationDecision().isDecisionTaken()); diff --git a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java index a9f5ac72a67..98e8970dfb5 100644 --- a/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/ClusterModuleTests.java @@ -22,6 +22,7 @@ package org.elasticsearch.cluster; import org.elasticsearch.cluster.metadata.Metadata; import org.elasticsearch.cluster.metadata.RepositoriesMetadata; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.ShardAllocationDecision; import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; @@ -50,7 +51,9 @@ import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.SettingsModule; +import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.test.gateway.TestGatewayAllocator; import java.util.Arrays; import java.util.Collection; @@ -225,4 +228,26 @@ public class ClusterModuleTests extends ModuleTestCase { assertNotNull(fixedClusterState.metadata().custom(whiteListedMetadataCustom)); assertNull(fixedClusterState.metadata().custom("other")); } + + public void testRejectsReservedExistingShardsAllocatorName() { + final ClusterModule clusterModule = new ClusterModule(Settings.EMPTY, clusterService, + Collections.singletonList(existingShardsAllocatorPlugin(GatewayAllocator.ALLOCATOR_NAME)), clusterInfoService); + expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator())); + } + + public void testRejectsDuplicateExistingShardsAllocatorName() { + final ClusterModule clusterModule = new ClusterModule(Settings.EMPTY, clusterService, + Arrays.asList(existingShardsAllocatorPlugin("duplicate"), existingShardsAllocatorPlugin("duplicate")), clusterInfoService); + expectThrows(IllegalArgumentException.class, () -> clusterModule.setExistingShardsAllocators(new TestGatewayAllocator())); + } + + private static ClusterPlugin existingShardsAllocatorPlugin(final String allocatorName) { + return new ClusterPlugin() { + @Override + public Map getExistingShardsAllocators() { + return Collections.singletonMap(allocatorName, new TestGatewayAllocator()); + } + }; + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java index 0664ae7bdc6..9d94943ed77 100644 --- a/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/health/ClusterStateHealthTests.java @@ -40,6 +40,7 @@ import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.RoutingTableGenerator; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocationService; import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.collect.ImmutableOpenIntMap; @@ -141,7 +142,8 @@ public class ClusterStateHealthTests extends ESTestCase { listenerCalled.await(); TransportClusterHealthAction action = new TransportClusterHealthAction(transportService, - clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, new TestGatewayAllocator()); + clusterService, threadPool, new ActionFilters(new HashSet<>()), indexNameExpressionResolver, + new AllocationService(null, new TestGatewayAllocator(), null, null)); PlainActionFuture listener = new PlainActionFuture<>(); action.execute(new ClusterHealthRequest().waitForGreenStatus(), listener); diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java index 8e3afc8ae57..2cdbddda262 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/PrimaryAllocationIT.java @@ -172,7 +172,7 @@ public class PrimaryAllocationIT extends ESIntegTestCase { logger.info("--> check that old primary shard does not get promoted to primary again"); // kick reroute and wait for all shard states to be fetched client(master).admin().cluster().prepareReroute().get(); - assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetch(), + assertBusy(() -> assertThat(internalCluster().getInstance(GatewayAllocator.class, master).getNumberOfInFlightFetches(), equalTo(0))); // kick reroute a second time and check that all shards are unassigned assertThat(client(master).admin().cluster().prepareReroute().get().getState().getRoutingNodes().unassigned().size(), diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java index 5dcdeda90e2..56e7d27c534 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/AllocationServiceTests.java @@ -18,14 +18,49 @@ */ package org.elasticsearch.cluster.routing.allocation; +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterInfo; +import org.elasticsearch.cluster.ClusterName; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.EmptyClusterInfoService; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.IndexRoutingTable; +import org.elasticsearch.cluster.routing.IndexShardRoutingTable; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.RoutingTable; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; +import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationDecider; +import org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.gateway.TestGatewayAllocator; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import java.util.function.Function; import java.util.stream.Collectors; import java.util.stream.IntStream; +import static org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus.DECIDERS_NO; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING; +import static org.elasticsearch.cluster.routing.allocation.decider.ThrottlingAllocationDecider.CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.not; public class AllocationServiceTests extends ESTestCase { @@ -74,4 +109,242 @@ public class AllocationServiceTests extends ESTestCase { assertThat(abbreviated, containsString("formatted")); assertThat(abbreviated, not(containsString("original"))); } + + public void testAssignsPrimariesInPriorityOrderThenReplicas() { + // throttle (incoming) recoveries in order to observe the order of operations, but do not throttle outgoing recoveries since + // the effects of that depend on the earlier (random) allocations + final Settings settings = Settings.builder() + .put(CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING.getKey(), 1) + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING.getKey(), 1) + .put(CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING.getKey(), Integer.MAX_VALUE) + .build(); + final ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS); + final AllocationService allocationService = new AllocationService( + new AllocationDeciders(Arrays.asList( + new SameShardAllocationDecider(settings, clusterSettings), + new ThrottlingAllocationDecider(settings, clusterSettings))), + new ShardsAllocator() { + @Override + public void allocate(RoutingAllocation allocation) { + // all primaries are handled by existing shards allocators in these tests; even the invalid allocator prevents shards + // from falling through to here + assertThat(allocation.routingNodes().unassigned().getNumPrimaries(), equalTo(0)); + } + + @Override + public ShardAllocationDecision decideShardAllocation(ShardRouting shard, RoutingAllocation allocation) { + return ShardAllocationDecision.NOT_TAKEN; + } + }, new EmptyClusterInfoService()); + + final String unrealisticAllocatorName = "unrealistic"; + final Map allocatorMap = new HashMap<>(); + final TestGatewayAllocator testGatewayAllocator = new TestGatewayAllocator(); + allocatorMap.put(GatewayAllocator.ALLOCATOR_NAME, testGatewayAllocator); + allocatorMap.put(unrealisticAllocatorName, new UnrealisticAllocator()); + allocationService.setExistingShardsAllocators(allocatorMap); + + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)); + nodesBuilder.add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)); + nodesBuilder.add(new DiscoveryNode("node3", buildNewFakeTransportAddress(), Version.CURRENT)); + + final Metadata.Builder metaData = Metadata.builder() + // create 3 indices with different priorities. The high and low priority indices use the default allocator which (in this test) + // does not allocate any replicas, whereas the medium priority one uses the unrealistic allocator which does allocate replicas + .put(indexMetadata("highPriority", Settings.builder() + .put(IndexMetadata.SETTING_PRIORITY, 10))) + .put(indexMetadata("mediumPriority", Settings.builder() + .put(IndexMetadata.SETTING_PRIORITY, 5) + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), unrealisticAllocatorName))) + .put(indexMetadata("lowPriority", Settings.builder() + .put(IndexMetadata.SETTING_PRIORITY, 3))) + + // also create a 4th index with arbitrary priority and an invalid allocator that we expect to ignore + .put(indexMetadata("invalid", Settings.builder() + .put(IndexMetadata.SETTING_PRIORITY, between(0, 15)) + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), "unknown"))); + + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder() + .addAsRecovery(metaData.get("highPriority")) + .addAsRecovery(metaData.get("mediumPriority")) + .addAsRecovery(metaData.get("lowPriority")) + .addAsRecovery(metaData.get("invalid")); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(nodesBuilder) + .metadata(metaData) + .routingTable(routingTableBuilder.build()) + .build(); + + // permit the testGatewayAllocator to allocate primaries to every node + for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) { + for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) { + final ShardRouting primaryShard = indexShardRoutingTable.primaryShard(); + for (DiscoveryNode node : clusterState.nodes()) { + testGatewayAllocator.addKnownAllocation(primaryShard.initialize(node.getId(), FAKE_IN_SYNC_ALLOCATION_ID, 0L)); + } + } + } + + final ClusterState reroutedState1 = rerouteAndStartShards(allocationService, clusterState); + final RoutingTable routingTable1 = reroutedState1.routingTable(); + // the test harness only permits one recovery per node, so we must have allocated all the high-priority primaries and one of the + // medium-priority ones + assertThat(routingTable1.shardsWithState(ShardRoutingState.INITIALIZING), empty()); + assertThat(routingTable1.shardsWithState(ShardRoutingState.RELOCATING), empty()); + assertTrue(routingTable1.shardsWithState(ShardRoutingState.STARTED).stream().allMatch(ShardRouting::primary)); + assertThat(routingTable1.index("highPriority").primaryShardsActive(), equalTo(2)); + assertThat(routingTable1.index("mediumPriority").primaryShardsActive(), equalTo(1)); + assertThat(routingTable1.index("lowPriority").shardsWithState(ShardRoutingState.STARTED), empty()); + assertThat(routingTable1.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty()); + + final ClusterState reroutedState2 = rerouteAndStartShards(allocationService, reroutedState1); + final RoutingTable routingTable2 = reroutedState2.routingTable(); + // this reroute starts the one remaining medium-priority primary and both of the low-priority ones, but no replicas + assertThat(routingTable2.shardsWithState(ShardRoutingState.INITIALIZING), empty()); + assertThat(routingTable2.shardsWithState(ShardRoutingState.RELOCATING), empty()); + assertTrue(routingTable2.shardsWithState(ShardRoutingState.STARTED).stream().allMatch(ShardRouting::primary)); + assertTrue(routingTable2.index("highPriority").allPrimaryShardsActive()); + assertTrue(routingTable2.index("mediumPriority").allPrimaryShardsActive()); + assertTrue(routingTable2.index("lowPriority").allPrimaryShardsActive()); + assertThat(routingTable2.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty()); + + final ClusterState reroutedState3 = rerouteAndStartShards(allocationService, reroutedState2); + final RoutingTable routingTable3 = reroutedState3.routingTable(); + // this reroute starts the two medium-priority replicas since their allocator permits this + assertThat(routingTable3.shardsWithState(ShardRoutingState.INITIALIZING), empty()); + assertThat(routingTable3.shardsWithState(ShardRoutingState.RELOCATING), empty()); + assertTrue(routingTable3.index("highPriority").allPrimaryShardsActive()); + assertThat(routingTable3.index("mediumPriority").shardsWithState(ShardRoutingState.UNASSIGNED), empty()); + assertTrue(routingTable3.index("lowPriority").allPrimaryShardsActive()); + assertThat(routingTable3.index("invalid").shardsWithState(ShardRoutingState.STARTED), empty()); + } + + public void testExplainsNonAllocationOfShardWithUnknownAllocator() { + final AllocationService allocationService = new AllocationService(null, null, null); + allocationService.setExistingShardsAllocators( + Collections.singletonMap(GatewayAllocator.ALLOCATOR_NAME, new TestGatewayAllocator())); + + final DiscoveryNodes.Builder nodesBuilder = DiscoveryNodes.builder(); + nodesBuilder.add(new DiscoveryNode("node1", buildNewFakeTransportAddress(), Version.CURRENT)); + nodesBuilder.add(new DiscoveryNode("node2", buildNewFakeTransportAddress(), Version.CURRENT)); + + final Metadata.Builder metadata = Metadata.builder().put(indexMetadata("index", Settings.builder() + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), "unknown"))); + + final RoutingTable.Builder routingTableBuilder = RoutingTable.builder().addAsRecovery(metadata.get("index")); + + final ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT) + .nodes(nodesBuilder) + .metadata(metadata) + .routingTable(routingTableBuilder.build()) + .build(); + + final RoutingAllocation allocation = new RoutingAllocation(new AllocationDeciders(Collections.emptyList()), + clusterState.getRoutingNodes(), clusterState, ClusterInfo.EMPTY, 0L); + allocation.setDebugMode(randomBoolean() ? RoutingAllocation.DebugMode.ON : RoutingAllocation.DebugMode.EXCLUDE_YES_DECISIONS); + + final ShardAllocationDecision shardAllocationDecision + = allocationService.explainShardAllocation(clusterState.routingTable().index("index").shard(0).primaryShard(), allocation); + + assertTrue(shardAllocationDecision.isDecisionTaken()); + assertThat(shardAllocationDecision.getAllocateDecision().getAllocationStatus(), + equalTo(UnassignedInfo.AllocationStatus.NO_VALID_SHARD_COPY)); + assertThat(shardAllocationDecision.getAllocateDecision().getAllocationDecision(), + equalTo(AllocationDecision.NO_VALID_SHARD_COPY)); + assertThat(shardAllocationDecision.getAllocateDecision().getExplanation(), equalTo("cannot allocate because a previous copy of " + + "the primary shard existed but can no longer be found on the nodes in the cluster")); + + for (NodeAllocationResult nodeAllocationResult : shardAllocationDecision.getAllocateDecision().nodeDecisions) { + assertThat(nodeAllocationResult.getNodeDecision(), equalTo(AllocationDecision.NO)); + assertThat(nodeAllocationResult.getCanAllocateDecision().type(), equalTo(Decision.Type.NO)); + assertThat(nodeAllocationResult.getCanAllocateDecision().label(), equalTo("allocator_plugin")); + assertThat(nodeAllocationResult.getCanAllocateDecision().getExplanation(), equalTo("finding the previous copies of this " + + "shard requires an allocator called [unknown] but that allocator was not found; perhaps the corresponding plugin is " + + "not installed")); + } + } + + private static final String FAKE_IN_SYNC_ALLOCATION_ID = "_in_sync_"; // so we can allocate primaries anywhere + + private static IndexMetadata.Builder indexMetadata(String name, Settings.Builder settings) { + return IndexMetadata.builder(name) + .settings(settings(Version.CURRENT).put(settings.build())) + .numberOfShards(2).numberOfReplicas(1) + .putInSyncAllocationIds(0, Collections.singleton(FAKE_IN_SYNC_ALLOCATION_ID)) + .putInSyncAllocationIds(1, Collections.singleton(FAKE_IN_SYNC_ALLOCATION_ID)); + } + + /** + * Allocates shards to nodes regardless of whether there's already a shard copy there. + */ + private static class UnrealisticAllocator implements ExistingShardsAllocator { + + @Override + public void beforeAllocation(RoutingAllocation allocation) { + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + } + + @Override + public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler) { + final AllocateUnassignedDecision allocateUnassignedDecision = explainUnassignedShardAllocation(shardRouting, allocation); + if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) { + unassignedAllocationHandler.initialize(allocateUnassignedDecision.getTargetNode().getId(), + shardRouting.primary() ? FAKE_IN_SYNC_ALLOCATION_ID : null, 0L, allocation.changes()); + } else { + unassignedAllocationHandler.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes()); + } + } + + @Override + public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting shardRouting, RoutingAllocation allocation) { + boolean throttled = false; + + for (final RoutingNode routingNode : allocation.routingNodes()) { + final Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation); + if (decision.type() == Decision.Type.YES) { + return AllocateUnassignedDecision.yes(routingNode.node(), null, null, false); + } else { + if (shardRouting.index().getName().equals("mediumPriority") && shardRouting.primary() == false + && decision.type() == Decision.Type.THROTTLE) { + allocation.deciders().canAllocate(shardRouting, routingNode, allocation); + } + } + + throttled = throttled || decision.type() == Decision.Type.THROTTLE; + } + + return throttled ? AllocateUnassignedDecision.throttle(null) + : AllocateUnassignedDecision.no(DECIDERS_NO, null); + } + + @Override + public void cleanCaches() { + } + + @Override + public void applyStartedShards(List startedShards, RoutingAllocation allocation) { + } + + @Override + public void applyFailedShards(List failedShards, RoutingAllocation allocation) { + } + + @Override + public int getNumberOfInFlightFetches() { + return 0; + } + } + + private static ClusterState rerouteAndStartShards(final AllocationService allocationService, final ClusterState clusterState) { + final ClusterState reroutedState = allocationService.reroute(clusterState, "test"); + return allocationService.applyStartedShards(reroutedState, + reroutedState.routingTable().shardsWithState(ShardRoutingState.INITIALIZING)); + } + } diff --git a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java index 847bb30e6e4..952b1cac500 100644 --- a/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java +++ b/server/src/test/java/org/elasticsearch/cluster/routing/allocation/ClusterRebalanceRoutingTests.java @@ -566,19 +566,13 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { AllocationService strategy = createAllocationService(Settings.EMPTY, new TestGatewayAllocator() { @Override - public void allocateUnassigned(RoutingAllocation allocation) { - if (allocateTest1.get() == false) { - RoutingNodes.UnassignedShards unassigned = allocation.routingNodes().unassigned(); - RoutingNodes.UnassignedShards.UnassignedIterator iterator = unassigned.iterator(); - while (iterator.hasNext()) { - ShardRouting next = iterator.next(); - if ("test1".equals(next.index().getName())) { - iterator.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); - } - - } + public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler) { + if (allocateTest1.get() == false && "test1".equals(shardRouting.index().getName())) { + unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.NO_ATTEMPT, allocation.changes()); + } else { + super.allocateUnassigned(shardRouting, allocation, unassignedAllocationHandler); } - super.allocateUnassigned(allocation); } }); @@ -667,11 +661,10 @@ public class ClusterRebalanceRoutingTests extends ESAllocationTestCase { .put(ClusterRebalanceAllocationDecider.CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING.getKey(), ClusterRebalanceAllocationDecider.ClusterRebalanceType.ALWAYS.toString()).build(), new TestGatewayAllocator() { @Override - public void allocateUnassigned(RoutingAllocation allocation) { + public void beforeAllocation(RoutingAllocation allocation) { if (hasFetches.get()) { allocation.setHasPendingAsyncFetch(); } - super.allocateUnassigned(allocation); } }); diff --git a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java index c603eda906c..8e9bc382cb8 100644 --- a/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java +++ b/server/src/test/java/org/elasticsearch/common/blobstore/fs/FsBlobContainerTests.java @@ -18,14 +18,78 @@ */ package org.elasticsearch.common.blobstore.fs; +import org.apache.lucene.mockfile.FilterFileSystemProvider; +import org.apache.lucene.mockfile.FilterSeekableByteChannel; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.io.PathUtils; +import org.elasticsearch.common.io.PathUtilsForTesting; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.core.internal.io.IOUtils; import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.SeekableByteChannel; +import java.nio.file.FileSystem; +import java.nio.file.Files; +import java.nio.file.OpenOption; +import java.nio.file.Path; +import java.nio.file.attribute.FileAttribute; +import java.nio.file.spi.FileSystemProvider; +import java.util.Locale; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.Consumer; import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.is; import static org.hamcrest.Matchers.startsWith; +@LuceneTestCase.SuppressFileSystems("*") // we do our own mocking public class FsBlobContainerTests extends ESTestCase { + final AtomicLong totalBytesRead = new AtomicLong(0); + FileSystem fileSystem = null; + + @Before + public void setupMockFileSystems() { + FileSystemProvider fileSystemProvider = new MockFileSystemProvider(PathUtils.getDefaultFileSystem(), totalBytesRead::addAndGet); + fileSystem = fileSystemProvider.getFileSystem(null); + PathUtilsForTesting.installMock(fileSystem); // restored by restoreFileSystem in ESTestCase + } + + @After + public void closeMockFileSystems() throws IOException { + IOUtils.close(fileSystem); + } + + public void testReadBlobRangeCorrectlySkipBytes() throws IOException { + final String blobName = randomAlphaOfLengthBetween(1, 20).toLowerCase(Locale.ROOT); + final byte[] blobData = randomByteArrayOfLength(randomIntBetween(1, frequently() ? 512 : 1 << 20)); // rarely up to 1mb + + final Path path = PathUtils.get(createTempDir().toString()); + Files.write(path.resolve(blobName), blobData); + + final FsBlobContainer container = new FsBlobContainer(new FsBlobStore(Settings.EMPTY, path, false), BlobPath.cleanPath(), path); + assertThat(totalBytesRead.get(), equalTo(0L)); + + final long start = randomLongBetween(0L, Math.max(0L, blobData.length - 1)); + final long length = randomLongBetween(1L, blobData.length - start); + + try (InputStream stream = container.readBlob(blobName, start, length)) { + assertThat(totalBytesRead.get(), equalTo(0L)); + assertThat(Streams.consumeFully(stream), equalTo(length)); + assertThat(totalBytesRead.get(), equalTo(length)); + } + } + public void testTempBlobName() { final String blobName = randomAlphaOfLengthBetween(1, 20); final String tempBlobName = FsBlobContainer.tempBlobName(blobName); @@ -37,4 +101,48 @@ public class FsBlobContainerTests extends ESTestCase { final String tempBlobName = FsBlobContainer.tempBlobName(randomAlphaOfLengthBetween(1, 20)); assertThat(FsBlobContainer.isTempBlobName(tempBlobName), is(true)); } + + static class MockFileSystemProvider extends FilterFileSystemProvider { + + final Consumer onRead; + + MockFileSystemProvider(FileSystem inner, Consumer onRead) { + super("mockfs://", inner); + this.onRead = onRead; + } + + private int onRead(int read) { + if (read != -1) { + onRead.accept((long) read); + } + return read; + } + + @Override + public SeekableByteChannel newByteChannel(Path path, Set opts, FileAttribute... attrs) throws IOException { + return new FilterSeekableByteChannel(super.newByteChannel(path, opts, attrs)) { + @Override + public int read(ByteBuffer dst) throws IOException { + return onRead(super.read(dst)); + } + }; + } + + @Override + public InputStream newInputStream(Path path, OpenOption... opts) throws IOException { + // no super.newInputStream(path, opts) as it will use the delegating FileSystem to open a SeekableByteChannel + // and instead we want the mocked newByteChannel() method to be used + return new FilterInputStream(delegate.newInputStream(path, opts)) { + @Override + public int read() throws IOException { + return onRead(super.read()); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return onRead(super.read(b, off, len)); + } + }; + } + } } diff --git a/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java b/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java index ee1933e3a10..30c8a9c6e49 100644 --- a/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java +++ b/server/src/test/java/org/elasticsearch/common/io/StreamsTests.java @@ -79,4 +79,20 @@ public class StreamsTests extends ESTestCase { assertEquals(-1, input.read()); input.close(); } + + public void testFullyConsumeInputStream() throws IOException { + final String bytes = randomAlphaOfLengthBetween(0, 100); + final BytesArray stuffArray = new BytesArray(bytes); + assertEquals(bytes.length(), Streams.consumeFully(stuffArray.streamInput())); + } + + public void testLimitInputStream() throws IOException { + final byte[] bytes = randomAlphaOfLengthBetween(1, 100).getBytes(StandardCharsets.UTF_8); + final int limit = randomIntBetween(0, bytes.length); + final BytesArray stuffArray = new BytesArray(bytes); + final ByteArrayOutputStream out = new ByteArrayOutputStream(bytes.length); + final long count = Streams.copy(Streams.limitStream(stuffArray.streamInput(), limit), out); + assertEquals(limit, count); + assertThat(Arrays.equals(out.toByteArray(), Arrays.copyOf(bytes, limit)), equalTo(true)); + } } diff --git a/server/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java b/server/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java index 7113a301e7f..930a1516fae 100644 --- a/server/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java +++ b/server/src/test/java/org/elasticsearch/common/lucene/store/ByteArrayIndexInputTests.java @@ -19,15 +19,12 @@ package org.elasticsearch.common.lucene.store; -import org.apache.lucene.store.IndexInput; -import org.elasticsearch.test.ESTestCase; - import java.io.IOException; import java.nio.charset.StandardCharsets; import static org.hamcrest.Matchers.containsString; -public class ByteArrayIndexInputTests extends ESTestCase { +public class ByteArrayIndexInputTests extends ESIndexInputTestCase { public void testRandomReads() throws IOException { for (int i = 0; i < 100; i++) { byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(StandardCharsets.UTF_8); @@ -87,47 +84,5 @@ public class ByteArrayIndexInputTests extends ESTestCase { } } - private byte[] randomReadAndSlice(IndexInput indexInput, int length) throws IOException { - int readPos = (int) indexInput.getFilePointer(); - byte[] output = new byte[length]; - while (readPos < length) { - switch (randomIntBetween(0, 3)) { - case 0: - // Read by one byte at a time - output[readPos++] = indexInput.readByte(); - break; - case 1: - // Read several bytes into target - int len = randomIntBetween(1, length - readPos); - indexInput.readBytes(output, readPos, len); - readPos += len; - break; - case 2: - // Read several bytes into 0-offset target - len = randomIntBetween(1, length - readPos); - byte[] temp = new byte[len]; - indexInput.readBytes(temp, 0, len); - System.arraycopy(temp, 0, output, readPos, len); - readPos += len; - break; - case 3: - // Read using slice - len = randomIntBetween(1, length - readPos); - IndexInput slice = indexInput.slice("slice (" + readPos + ", " + len + ") of " + indexInput.toString(), readPos, len); - temp = randomReadAndSlice(slice, len); - // assert that position in the original input didn't change - assertEquals(readPos, indexInput.getFilePointer()); - System.arraycopy(temp, 0, output, readPos, len); - readPos += len; - indexInput.seek(readPos); - assertEquals(readPos, indexInput.getFilePointer()); - break; - default: - fail(); - } - assertEquals(readPos, indexInput.getFilePointer()); - } - return output; - } } diff --git a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java index 38bb64ebf32..2404facdda7 100644 --- a/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java @@ -78,12 +78,19 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { this.testAllocator = new TestAllocator(); } + private void allocateAllUnassigned(final RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + testAllocator.allocateUnassigned(iterator.next(), allocation, iterator); + } + } + public void testNoProcessPrimaryNotAllocatedBefore() { final RoutingAllocation allocation; // with old version, we can't know if a shard was allocated before or not allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomFrom(INDEX_CREATED, CLUSTER_RECOVERED, INDEX_REOPENED)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().iterator().next().shardId(), equalTo(shardId)); @@ -96,7 +103,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { public void testNoAsyncFetchData() { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "allocId"); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -111,7 +118,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "allocId"); testAllocator.addData(node1, null, randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -125,7 +132,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { public void testNoMatchingAllocationIdFound() { RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "id2"); testAllocator.addData(node1, "id1", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -139,7 +146,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean(), new CorruptIndexException("test", "test")); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -153,7 +160,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean(), new ShardLockObtainFailedException(shardId, "test")); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -177,7 +184,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { testAllocator.addData(node1, allocId1, randomBoolean(), new ShardLockObtainFailedException(shardId, "test")); testAllocator.addData(node2, allocId2, randomBoolean(), null); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -196,7 +203,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), randomFrom(CLUSTER_RECOVERED, INDEX_REOPENED), "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -221,7 +228,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { new TestAllocateDecision(randomBoolean() ? Decision.YES : Decision.NO), getNoDeciderThatAllowsForceAllocate() )); RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1"); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertTrue(allocation.routingNodes().unassigned().ignored().isEmpty()); assertEquals(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), 1); @@ -244,7 +251,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { getNoDeciderThatThrottlesForceAllocate() )); RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1"); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); List ignored = allocation.routingNodes().unassigned().ignored(); assertEquals(ignored.size(), 1); @@ -268,7 +275,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { new TestAllocateDecision(Decision.THROTTLE), getNoDeciderThatAllowsForceAllocate() )); RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(deciders, CLUSTER_RECOVERED, "allocId1"); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); List ignored = allocation.routingNodes().unassigned().ignored(); assertEquals(ignored.size(), 1); @@ -287,7 +294,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { boolean node1HasPrimaryShard = randomBoolean(); testAllocator.addData(node1, node1HasPrimaryShard ? primaryAllocId : replicaAllocId, node1HasPrimaryShard); testAllocator.addData(node2, node1HasPrimaryShard ? replicaAllocId : primaryAllocId, !node1HasPrimaryShard); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -305,7 +312,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -320,7 +327,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { final RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), CLUSTER_RECOVERED, "allocId1"); testAllocator.addData(node1, "allocId1", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -336,7 +343,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { public void testRestore() { RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), "allocId"); testAllocator.addData(node1, "some allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -350,7 +357,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { public void testRestoreThrottle() { RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders(), "allocId"); testAllocator.addData(node1, "some allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false)); assertClusterHealthStatus(allocation, ClusterHealthStatus.YELLOW); @@ -363,7 +370,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { public void testRestoreForcesAllocateIfShardAvailable() { RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders(), "allocId"); testAllocator.addData(node1, "some allocId", randomBoolean()); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); @@ -377,7 +384,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase { public void testRestoreDoesNotAssignIfNoShardAvailable() { RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), "allocId"); testAllocator.addData(node1, null, false); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true)); assertThat(allocation.routingNodes().unassigned().size(), equalTo(1)); diff --git a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java index 75fbf51cc01..2432fb01673 100644 --- a/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java +++ b/server/src/test/java/org/elasticsearch/gateway/ReplicaShardAllocatorTests.java @@ -87,13 +87,20 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { this.testAllocator = new TestAllocator(); } + private void allocateAllUnassigned(final RoutingAllocation allocation) { + final RoutingNodes.UnassignedShards.UnassignedIterator iterator = allocation.routingNodes().unassigned().iterator(); + while (iterator.hasNext()) { + testAllocator.allocateUnassigned(iterator.next(), allocation, iterator); + } + } + /** * Verifies that when we are still fetching data in an async manner, the replica shard moves to ignore unassigned. */ public void testNoAsyncFetchData() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); testAllocator.clean(); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); } @@ -106,7 +113,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, UnassignedInfo.Reason.INDEX_CREATED); testAllocator.clean(); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(testAllocator.getFetchDataCalledAndClean(), equalTo(false)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); @@ -121,7 +128,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { EnumSet.complementOf(EnumSet.of(UnassignedInfo.Reason.INDEX_CREATED))); RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders(), Settings.EMPTY, reason); testAllocator.clean(); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat("failed with reason " + reason, testAllocator.getFetchDataCalledAndClean(), equalTo(true)); } @@ -133,7 +140,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(nodeToMatch, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -147,7 +154,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(nodeToMatch, "MATCH", new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM" ,MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -161,7 +168,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { DiscoveryNode nodeToMatch = randomBoolean() ? node2 : node3; testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(nodeToMatch, "NO_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(nodeToMatch.getId())); @@ -179,7 +186,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node2, "NOT_MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node3.getId())); @@ -253,7 +260,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); testAllocator.addData(node2, null); // has retention lease but store is empty testAllocator.addData(node3, randomSyncId(), new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node3.getId())); @@ -268,7 +275,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testNoPrimaryData() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); } @@ -280,7 +287,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { public void testNoDataForReplicaOnAnyNode() { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); } @@ -293,7 +300,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { RoutingAllocation allocation = onePrimaryOnNode1And1Replica(yesAllocationDeciders()); testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node2, "NO_MATCH", new StoreFileMetadata("file1", 10, "NO_MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.UNASSIGNED).get(0).shardId(), equalTo(shardId)); } @@ -307,7 +314,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { onePrimaryOnNode1And1Replica(randomBoolean() ? noAllocationDeciders() : throttleAllocationDeciders()); testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); } @@ -332,7 +339,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { }))); testAllocator.addData(node1, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)) .addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); } @@ -346,7 +353,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { // we sometime return empty list of files, make sure we test this as well testAllocator.addData(node2, null); } - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(false)); assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1)); assertThat(allocation.routingNodes().unassigned().ignored().get(0).shardId(), equalTo(shardId)); @@ -355,7 +362,7 @@ public class ReplicaShardAllocatorTests extends ESAllocationTestCase { Settings.builder().put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), TimeValue.timeValueHours(1)).build(), UnassignedInfo.Reason.NODE_LEFT); testAllocator.addData(node2, "MATCH", new StoreFileMetadata("file1", 10, "MATCH_CHECKSUM", MIN_SUPPORTED_LUCENE_VERSION)); - testAllocator.allocateUnassigned(allocation); + allocateAllUnassigned(allocation); assertThat(allocation.routingNodesChanged(), equalTo(true)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1)); assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), diff --git a/test/fixtures/azure-fixture/docker-compose.yml b/test/fixtures/azure-fixture/docker-compose.yml index ff328c52f34..61ea9d28a56 100644 --- a/test/fixtures/azure-fixture/docker-compose.yml +++ b/test/fixtures/azure-fixture/docker-compose.yml @@ -8,3 +8,12 @@ services: - ./testfixtures_shared/shared:/fixture/shared ports: - "8091" + + azure-fixture-other: + build: + context: . + dockerfile: Dockerfile + volumes: + - ./testfixtures_shared/shared:/fixture/shared + ports: + - "8091" diff --git a/test/fixtures/s3-fixture/docker-compose.yml b/test/fixtures/s3-fixture/docker-compose.yml index 401a43c9255..1d06334eddb 100644 --- a/test/fixtures/s3-fixture/docker-compose.yml +++ b/test/fixtures/s3-fixture/docker-compose.yml @@ -15,6 +15,21 @@ services: ports: - "80" + s3-fixture-other: + build: + context: . + args: + fixtureClass: fixture.s3.S3HttpFixture + port: 80 + bucket: "bucket" + basePath: "base_path" + accessKey: "access_key" + dockerfile: Dockerfile + volumes: + - ./testfixtures_shared/shared:/fixture/shared + ports: + - "80" + s3-fixture-with-session-token: build: context: . diff --git a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java index f9bce9f02c8..7ae8747aadb 100644 --- a/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java +++ b/test/fixtures/s3-fixture/src/main/java/fixture/s3/S3HttpHandler.java @@ -216,13 +216,13 @@ public class S3HttpHandler implements HttpHandler { final int start = Integer.parseInt(matcher.group(1)); final int end = Integer.parseInt(matcher.group(2)); - final int length = end - start; + final BytesReference rangeBlob = blob.slice(start, end + 1 - start); exchange.getResponseHeaders().add("Content-Type", "application/octet-stream"); - exchange.getResponseHeaders().add("Content-Range", - String.format(Locale.ROOT, "bytes=%d-%d/%d", start, end, blob.length())); - exchange.sendResponseHeaders(RestStatus.OK.getStatus(), length); - exchange.getResponseBody().write(BytesReference.toBytes(blob), start, length); + exchange.getResponseHeaders().add("Content-Range", String.format(Locale.ROOT, "bytes %d-%d/%d", + start, end, rangeBlob.length())); + exchange.sendResponseHeaders(RestStatus.OK.getStatus(), rangeBlob.length()); + rangeBlob.writeTo(exchange.getResponseBody()); } } else { exchange.sendResponseHeaders(RestStatus.NOT_FOUND.getStatus(), -1); diff --git a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java index 4dd002a4602..fe5de265043 100644 --- a/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/cluster/ESAllocationTestCase.java @@ -23,7 +23,6 @@ import org.elasticsearch.Version; import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNodeRole; import org.elasticsearch.cluster.routing.RoutingNode; -import org.elasticsearch.cluster.routing.RoutingNodes; import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.allocation.AllocationService; @@ -258,26 +257,33 @@ public abstract class ESAllocationTestCase extends ESTestCase { public DelayedShardsMockGatewayAllocator() {} @Override - public void applyStartedShards(RoutingAllocation allocation, List startedShards) { + public void applyStartedShards(List startedShards, RoutingAllocation allocation) { // no-op } @Override - public void applyFailedShards(RoutingAllocation allocation, List failedShards) { + public void applyFailedShards(List failedShards, RoutingAllocation allocation) { // no-op } @Override - public void allocateUnassigned(RoutingAllocation allocation) { - final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = allocation.routingNodes().unassigned().iterator(); - while (unassignedIterator.hasNext()) { - ShardRouting shard = unassignedIterator.next(); - if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { - continue; - } - if (shard.unassignedInfo().isDelayed()) { - unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes()); - } + public void beforeAllocation(RoutingAllocation allocation) { + // no-op + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + // no-op + } + + @Override + public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler) { + if (shardRouting.primary() || shardRouting.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { + return; + } + if (shardRouting.unassignedInfo().isDelayed()) { + unassignedAllocationHandler.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes()); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java b/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java new file mode 100644 index 00000000000..eb543006db4 --- /dev/null +++ b/test/framework/src/main/java/org/elasticsearch/common/lucene/store/ESIndexInputTestCase.java @@ -0,0 +1,192 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.common.lucene.store; + +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.concurrent.AbstractRunnable; +import org.elasticsearch.common.util.concurrent.EsExecutors; +import org.elasticsearch.common.util.concurrent.EsThreadPoolExecutor; +import org.elasticsearch.common.util.concurrent.ThreadContext; +import org.elasticsearch.test.ESTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.util.concurrent.CountDownLatch; + +/** + * Test harness for verifying {@link IndexInput} implementations. + */ +public class ESIndexInputTestCase extends ESTestCase { + + private static EsThreadPoolExecutor executor; + + @BeforeClass + public static void createExecutor() { + final String name = "TEST-" + getTestClass().getSimpleName() + "#randomReadAndSlice"; + executor = EsExecutors.newFixed(name, 10, 0, EsExecutors.daemonThreadFactory(name), new ThreadContext(Settings.EMPTY)); + } + + @AfterClass + public static void destroyExecutor() { + executor.shutdown(); + } + + /** + * Reads the contents of an {@link IndexInput} from {@code indexInput.getFilePointer()} to {@code length} using a wide variety of + * different access methods. Returns an array of length {@code length} containing the bytes that were read starting at index + * {@code indexInput.getFilePointer()}. The bytes of the returned array with indices below the initial value of + * {@code indexInput.getFilePointer()} may contain anything. The final value of {@code indexInput.getFilePointer()} is {@code length}. + */ + protected byte[] randomReadAndSlice(IndexInput indexInput, int length) throws IOException { + int readPos = (int) indexInput.getFilePointer(); + byte[] output = new byte[length]; + while (readPos < length) { + switch (randomIntBetween(0, 5)) { + case 0: + // Read by one byte at a time + output[readPos++] = indexInput.readByte(); + break; + case 1: + // Read several bytes into target + int len = randomIntBetween(1, length - readPos); + indexInput.readBytes(output, readPos, len); + readPos += len; + break; + case 2: + // Read several bytes into 0-offset target + len = randomIntBetween(1, length - readPos); + byte[] temp = new byte[len]; + indexInput.readBytes(temp, 0, len); + System.arraycopy(temp, 0, output, readPos, len); + readPos += len; + break; + case 3: + // Read using slice + len = randomIntBetween(1, length - readPos); + IndexInput slice = indexInput.slice("slice (" + readPos + ", " + len + ") of " + indexInput, readPos, len); + temp = randomReadAndSlice(slice, len); + // assert that position in the original input didn't change + assertEquals(readPos, indexInput.getFilePointer()); + System.arraycopy(temp, 0, output, readPos, len); + readPos += len; + indexInput.seek(readPos); + assertEquals(readPos, indexInput.getFilePointer()); + break; + case 4: + // Seek at a random position and read a single byte, + // then seek back to original position + final int lastReadPos = readPos; + readPos = randomIntBetween(0, length - 1); + indexInput.seek(readPos); + assertEquals(readPos, indexInput.getFilePointer()); + final int bytesToRead = 1; + temp = randomReadAndSlice(indexInput, readPos + bytesToRead); + System.arraycopy(temp, readPos, output, readPos, bytesToRead); + readPos = lastReadPos; + indexInput.seek(readPos); + assertEquals(readPos, indexInput.getFilePointer()); + break; + case 5: + // Read clone or slice concurrently + final int cloneCount = between(1, 3); + final CountDownLatch startLatch = new CountDownLatch(1 + cloneCount); + final CountDownLatch finishLatch = new CountDownLatch(cloneCount); + + final PlainActionFuture mainThreadResultFuture = new PlainActionFuture<>(); + final int mainThreadReadStart = readPos; + final int mainThreadReadEnd = randomIntBetween(readPos + 1, length); + + for (int i = 0; i < cloneCount; i++) { + executor.execute(new AbstractRunnable() { + @Override + public void onFailure(Exception e) { + throw new AssertionError(e); + } + + @Override + protected void doRun() throws Exception { + final IndexInput clone; + final int readStart = between(0, length); + final int readEnd = between(readStart, length); + if (randomBoolean()) { + clone = indexInput.clone(); + } else { + final int sliceEnd = between(readEnd, length); + clone = indexInput.slice("concurrent slice (0, " + sliceEnd + ") of " + indexInput, 0L, sliceEnd); + } + startLatch.countDown(); + startLatch.await(); + clone.seek(readStart); + final byte[] cloneResult = randomReadAndSlice(clone, readEnd); + if (randomBoolean()) { + clone.close(); + } + + // the read from the clone should agree with the read from the main input on their overlap + final int maxStart = Math.max(mainThreadReadStart, readStart); + final int minEnd = Math.min(mainThreadReadEnd, readEnd); + if (maxStart < minEnd) { + final byte[] mainThreadResult = mainThreadResultFuture.actionGet(); + final int overlapLen = minEnd - maxStart; + final byte[] fromMainThread = new byte[overlapLen]; + final byte[] fromClone = new byte[overlapLen]; + System.arraycopy(mainThreadResult, maxStart, fromMainThread, 0, overlapLen); + System.arraycopy(cloneResult, maxStart, fromClone, 0, overlapLen); + assertArrayEquals(fromMainThread, fromClone); + } + } + + @Override + public void onAfter() { + finishLatch.countDown(); + } + + @Override + public void onRejection(Exception e) { + // all threads are busy, and queueing can lead this test to deadlock, so we need take no action + startLatch.countDown(); + } + }); + } + + try { + startLatch.countDown(); + startLatch.await(); + ActionListener.completeWith(mainThreadResultFuture, () -> randomReadAndSlice(indexInput, mainThreadReadEnd)); + System.arraycopy(mainThreadResultFuture.actionGet(), readPos, output, readPos, mainThreadReadEnd - readPos); + readPos = mainThreadReadEnd; + finishLatch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + break; + default: + fail(); + } + assertEquals(readPos, indexInput.getFilePointer()); + } + assertEquals(length, indexInput.getFilePointer()); + return output; + } + +} diff --git a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java index a2687bf18cc..1e1484ca9a3 100644 --- a/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java +++ b/test/framework/src/main/java/org/elasticsearch/snapshots/mockstore/BlobContainerWrapper.java @@ -45,6 +45,16 @@ public class BlobContainerWrapper implements BlobContainer { return delegate.readBlob(name); } + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + return delegate.readBlob(blobName, position, length); + } + + @Override + public long readBlobPreferredLength() { + return delegate.readBlobPreferredLength(); + } + @Override public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException { delegate.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists); diff --git a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java index 20ebf4c3f15..bc8c600da0c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java +++ b/test/framework/src/main/java/org/elasticsearch/test/gateway/TestGatewayAllocator.java @@ -95,13 +95,13 @@ public class TestGatewayAllocator extends GatewayAllocator { }; @Override - public void applyStartedShards(RoutingAllocation allocation, List startedShards) { + public void applyStartedShards(List startedShards, RoutingAllocation allocation) { currentNodes = allocation.nodes(); allocation.routingNodes().shards(ShardRouting::active).forEach(this::addKnownAllocation); } @Override - public void applyFailedShards(RoutingAllocation allocation, List failedShards) { + public void applyFailedShards(List failedShards, RoutingAllocation allocation) { currentNodes = allocation.nodes(); for (FailedShard failedShard : failedShards) { final ShardRouting failedRouting = failedShard.getRoutingEntry(); @@ -116,9 +116,18 @@ public class TestGatewayAllocator extends GatewayAllocator { } @Override - public void allocateUnassigned(RoutingAllocation allocation) { + public void beforeAllocation(RoutingAllocation allocation) { + } + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) { + } + + @Override + public void allocateUnassigned(ShardRouting shardRouting, RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler) { currentNodes = allocation.nodes(); - innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator); + innerAllocatedUnassigned(allocation, primaryShardAllocator, replicaShardAllocator, shardRouting, unassignedAllocationHandler); } /** diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java index a4edf47c569..89879d74638 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/XPackClientPlugin.java @@ -63,6 +63,7 @@ import org.elasticsearch.xpack.core.ilm.LifecycleAction; import org.elasticsearch.xpack.core.ilm.LifecycleType; import org.elasticsearch.xpack.core.ilm.ReadOnlyAction; import org.elasticsearch.xpack.core.ilm.RolloverAction; +import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction; import org.elasticsearch.xpack.core.ilm.SetPriorityAction; import org.elasticsearch.xpack.core.ilm.ShrinkAction; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; @@ -606,6 +607,7 @@ public class XPackClientPlugin extends Plugin implements ActionPlugin, NetworkPl new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, WaitForSnapshotAction.NAME, WaitForSnapshotAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new), // Transforms new NamedWriteableRegistry.Entry(XPackFeatureSet.Usage.class, XPackField.TRANSFORM, TransformFeatureSetUsage::new), new NamedWriteableRegistry.Entry(PersistentTaskParams.class, TransformField.TASK_NAME, TransformTaskParams::new), diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncActionBranchingStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncActionBranchingStep.java new file mode 100644 index 00000000000..501f202c4c0 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncActionBranchingStep.java @@ -0,0 +1,105 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.ilm; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.metadata.IndexMetadata; + +import java.util.Objects; + +/** + * This step wraps an {@link AsyncActionStep} in order to be able to manipulate what the next step will be, depending on the result of the + * wrapped {@link AsyncActionStep}. + *

+ * If the action response is complete, the {@link AsyncActionBranchingStep}'s nextStepKey will be the nextStepKey of the wrapped action. If + * the response is incomplete the nextStepKey will be the provided {@link AsyncActionBranchingStep#nextKeyOnIncompleteResponse}. + * Failures encountered whilst executing the wrapped action will be propagated directly. + */ +public class AsyncActionBranchingStep extends AsyncActionStep { + private final AsyncActionStep stepToExecute; + + private StepKey nextKeyOnIncompleteResponse; + private SetOnce onResponseResult; + + public AsyncActionBranchingStep(AsyncActionStep stepToExecute, StepKey nextKeyOnIncompleteResponse, Client client) { + // super.nextStepKey is set to null since it is not used by this step + super(stepToExecute.getKey(), null, client); + this.stepToExecute = stepToExecute; + this.nextKeyOnIncompleteResponse = nextKeyOnIncompleteResponse; + this.onResponseResult = new SetOnce<>(); + } + + @Override + public boolean isRetryable() { + return true; + } + + @Override + public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer, + Listener listener) { + stepToExecute.performAction(indexMetadata, currentClusterState, observer, new Listener() { + @Override + public void onResponse(boolean complete) { + onResponseResult.set(complete); + listener.onResponse(complete); + } + + @Override + public void onFailure(Exception e) { + listener.onFailure(e); + } + }); + } + + @Override + public final StepKey getNextStepKey() { + if (onResponseResult.get() == null) { + throw new IllegalStateException("cannot call getNextStepKey before performAction"); + } + return onResponseResult.get() ? stepToExecute.getNextStepKey() : nextKeyOnIncompleteResponse; + } + + /** + * Represents the {@link AsyncActionStep} that's wrapped by this branching step. + */ + AsyncActionStep getStepToExecute() { + return stepToExecute; + } + + /** + * The step key to be reported as the {@link AsyncActionBranchingStep#getNextStepKey()} if the response of the wrapped + * {@link AsyncActionBranchingStep#getStepToExecute()} is incomplete. + */ + StepKey getNextKeyOnIncompleteResponse() { + return nextKeyOnIncompleteResponse; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + AsyncActionBranchingStep that = (AsyncActionBranchingStep) o; + return super.equals(o) + && Objects.equals(stepToExecute, that.stepToExecute) + && Objects.equals(nextKeyOnIncompleteResponse, that.nextKeyOnIncompleteResponse); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), stepToExecute, nextKeyOnIncompleteResponse); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncRetryDuringSnapshotActionStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncRetryDuringSnapshotActionStep.java index d3ac8f852cb..d480c24cb7c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncRetryDuringSnapshotActionStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/AsyncRetryDuringSnapshotActionStep.java @@ -33,8 +33,8 @@ public abstract class AsyncRetryDuringSnapshotActionStep extends AsyncActionStep } @Override - public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, - ClusterStateObserver observer, Listener listener) { + public final void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, + ClusterStateObserver observer, Listener listener) { // Wrap the original listener to handle exceptions caused by ongoing snapshots SnapshotExceptionListener snapshotExceptionListener = new SnapshotExceptionListener(indexMetadata.getIndex(), listener, observer); performDuringNoSnapshot(indexMetadata, currentClusterState, snapshotExceptionListener); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java new file mode 100644 index 00000000000..ad55f1cabc4 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStep.java @@ -0,0 +1,82 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.repositories.RepositoryMissingException; +import org.elasticsearch.snapshots.SnapshotMissingException; + +import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata; + +/** + * Deletes the snapshot designated by the repository and snapshot name present in the lifecycle execution state. + */ +public class CleanupSnapshotStep extends AsyncRetryDuringSnapshotActionStep { + public static final String NAME = "cleanup-snapshot"; + + public CleanupSnapshotStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public boolean isRetryable() { + return true; + } + + @Override + void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, Listener listener) { + final String indexName = indexMetadata.getIndex().getName(); + + LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetadata); + final String repositoryName = lifecycleState.getSnapshotRepository(); + // if the snapshot information is missing from the ILM execution state there is nothing to delete so we move on + if (Strings.hasText(repositoryName) == false) { + listener.onResponse(true); + return; + } + final String snapshotName = lifecycleState.getSnapshotName(); + if (Strings.hasText(snapshotName) == false) { + listener.onResponse(true); + return; + } + DeleteSnapshotRequest deleteSnapshotRequest = new DeleteSnapshotRequest(repositoryName, snapshotName); + getClient().admin().cluster().deleteSnapshot(deleteSnapshotRequest, new ActionListener() { + + @Override + public void onResponse(AcknowledgedResponse acknowledgedResponse) { + if (acknowledgedResponse.isAcknowledged() == false) { + String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + throw new ElasticsearchException("cleanup snapshot step request for repository [" + repositoryName + "] and snapshot " + + "[" + snapshotName + "] policy [" + policyName + "] and index [" + indexName + "] failed to be acknowledged"); + } + listener.onResponse(true); + } + + @Override + public void onFailure(Exception e) { + if (e instanceof SnapshotMissingException) { + // during the happy flow we generate a snapshot name and that snapshot doesn't exist in the repository + listener.onResponse(true); + } else { + if (e instanceof RepositoryMissingException) { + String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + listener.onFailure(new IllegalStateException("repository [" + repositoryName + "] is missing. [" + policyName + + "] policy for index [" + indexName + "] cannot continue until the repository is created", e)); + } else { + listener.onFailure(e); + } + } + } + }); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java index 3e15900f919..349bec41b9c 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStep.java @@ -19,23 +19,32 @@ import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTO /** * Copies the execution state data from one index to another, typically after a - * new index has been created. Useful for actions such as shrink. + * new index has been created. As part of the execution state copy it will set the target index + * "current step" to the provided step name (part of the same phase and action as the current step's, unless + * the "complete" step is configured in which case the action will be changed to "complete" as well) + * + * Useful for actions such as shrink. */ public class CopyExecutionStateStep extends ClusterStateActionStep { public static final String NAME = "copy-execution-state"; private static final Logger logger = LogManager.getLogger(CopyExecutionStateStep.class); - private String shrunkIndexPrefix; + private final String targetIndexPrefix; + private final String targetNextStepName; - - public CopyExecutionStateStep(StepKey key, StepKey nextStepKey, String shrunkIndexPrefix) { + public CopyExecutionStateStep(StepKey key, StepKey nextStepKey, String targetIndexPrefix, String targetNextStepName) { super(key, nextStepKey); - this.shrunkIndexPrefix = shrunkIndexPrefix; + this.targetIndexPrefix = targetIndexPrefix; + this.targetNextStepName = targetNextStepName; } - String getShrunkIndexPrefix() { - return shrunkIndexPrefix; + String getTargetIndexPrefix() { + return targetIndexPrefix; + } + + String getTargetNextStepName() { + return targetNextStepName; } @Override @@ -48,8 +57,8 @@ public class CopyExecutionStateStep extends ClusterStateActionStep { } // get source index String indexName = indexMetadata.getIndex().getName(); - // get target shrink index - String targetIndexName = shrunkIndexPrefix + indexName; + // get target index + String targetIndexName = targetIndexPrefix + indexName; IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName); if (targetIndexMetadata == null) { @@ -67,8 +76,14 @@ public class CopyExecutionStateStep extends ClusterStateActionStep { LifecycleExecutionState.Builder relevantTargetCustomData = LifecycleExecutionState.builder(); relevantTargetCustomData.setIndexCreationDate(lifecycleDate); relevantTargetCustomData.setPhase(phase); - relevantTargetCustomData.setAction(action); - relevantTargetCustomData.setStep(ShrunkenIndexCheckStep.NAME); + relevantTargetCustomData.setStep(targetNextStepName); + if (targetNextStepName.equals(PhaseCompleteStep.NAME)) { + relevantTargetCustomData.setAction(PhaseCompleteStep.NAME); + } else { + relevantTargetCustomData.setAction(action); + } + relevantTargetCustomData.setSnapshotRepository(lifecycleState.getSnapshotRepository()); + relevantTargetCustomData.setSnapshotName(lifecycleState.getSnapshotName()); Metadata.Builder newMetadata = Metadata.builder(clusterState.getMetadata()) .put(IndexMetadata.builder(targetIndexMetadata) @@ -79,15 +94,22 @@ public class CopyExecutionStateStep extends ClusterStateActionStep { @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; - if (!super.equals(o)) return false; + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } CopyExecutionStateStep that = (CopyExecutionStateStep) o; - return Objects.equals(shrunkIndexPrefix, that.shrunkIndexPrefix); + return Objects.equals(targetIndexPrefix, that.targetIndexPrefix) && + Objects.equals(targetNextStepName, that.targetNextStepName); } @Override public int hashCode() { - return Objects.hash(super.hashCode(), shrunkIndexPrefix); + return Objects.hash(super.hashCode(), targetIndexPrefix, targetNextStepName); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java new file mode 100644 index 00000000000..c6f7cbdb8d5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CopySettingsStep.java @@ -0,0 +1,112 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.Index; + +import java.util.Locale; +import java.util.Objects; + +/** + * Copy the provided settings from the source to the target index. + *

+ * The target index is derived from the source index using the provided prefix. + * This is useful for actions like shrink or searchable snapshot that create a new index and migrate the ILM execution from the source + * to the target index. + */ +public class CopySettingsStep extends ClusterStateActionStep { + public static final String NAME = "copy-settings"; + + private static final Logger logger = LogManager.getLogger(CopySettingsStep.class); + + private final String[] settingsKeys; + private final String indexPrefix; + + public CopySettingsStep(StepKey key, StepKey nextStepKey, String indexPrefix, String... settingsKeys) { + super(key, nextStepKey); + Objects.requireNonNull(indexPrefix); + Objects.requireNonNull(settingsKeys); + this.indexPrefix = indexPrefix; + this.settingsKeys = settingsKeys; + } + + @Override + public boolean isRetryable() { + return true; + } + + public String[] getSettingsKeys() { + return settingsKeys; + } + + public String getIndexPrefix() { + return indexPrefix; + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + String sourceIndexName = index.getName(); + IndexMetadata sourceIndexMetadata = clusterState.metadata().index(sourceIndexName); + String targetIndexName = indexPrefix + sourceIndexName; + IndexMetadata targetIndexMetadata = clusterState.metadata().index(targetIndexName); + + if (sourceIndexMetadata == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), sourceIndexName); + return clusterState; + } + + if (settingsKeys == null || settingsKeys.length == 0) { + return clusterState; + } + + if (targetIndexMetadata == null) { + String errorMessage = String.format(Locale.ROOT, "index [%s] is being referenced by ILM action [%s] on step [%s] but " + + "it doesn't exist", targetIndexName, getKey().getAction(), getKey().getName()); + logger.debug(errorMessage); + throw new IllegalStateException(errorMessage); + } + + Settings.Builder settings = Settings.builder().put(targetIndexMetadata.getSettings()); + for (String key : settingsKeys) { + String value = sourceIndexMetadata.getSettings().get(key); + settings.put(key, value); + } + + Metadata.Builder newMetaData = Metadata.builder(clusterState.getMetadata()) + .put(IndexMetadata.builder(targetIndexMetadata) + .settingsVersion(targetIndexMetadata.getSettingsVersion() + 1) + .settings(settings)); + return ClusterState.builder(clusterState).metadata(newMetaData).build(); + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + CopySettingsStep that = (CopySettingsStep) o; + return Objects.equals(settingsKeys, that.settingsKeys) && + Objects.equals(indexPrefix, that.indexPrefix); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), settingsKeys, indexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java new file mode 100644 index 00000000000..9caf0647cb5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStep.java @@ -0,0 +1,83 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.snapshots.SnapshotInfo; + +import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata; + +/** + * Creates a snapshot of the managed index into the configured repository and snapshot name. The repository and snapshot names are expected + * to be present in the lifecycle execution state (usually generated and stored by a different ILM step) + */ +public class CreateSnapshotStep extends AsyncRetryDuringSnapshotActionStep { + public static final String NAME = "create-snapshot"; + + private static final Logger logger = LogManager.getLogger(CreateSnapshotStep.class); + + public CreateSnapshotStep(StepKey key, StepKey nextStepKey, Client client) { + super(key, nextStepKey, client); + } + + @Override + public boolean isRetryable() { + return true; + } + + @Override + void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, Listener listener) { + final String indexName = indexMetadata.getIndex().getName(); + + final LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetadata); + + final String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + final String snapshotRepository = lifecycleState.getSnapshotRepository(); + if (Strings.hasText(snapshotRepository) == false) { + listener.onFailure(new IllegalStateException("snapshot repository is not present for policy [" + policyName + "] and index [" + + indexName + "]")); + return; + } + + final String snapshotName = lifecycleState.getSnapshotName(); + if (Strings.hasText(snapshotName) == false) { + listener.onFailure( + new IllegalStateException("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]")); + return; + } + CreateSnapshotRequest request = new CreateSnapshotRequest(snapshotRepository, snapshotName); + request.indices(indexName); + // this is safe as the snapshot creation will still be async, it's just that the listener will be notified when the snapshot is + // complete + request.waitForCompletion(true); + request.includeGlobalState(false); + request.masterNodeTimeout(getMasterTimeout(currentClusterState)); + getClient().admin().cluster().createSnapshot(request, + ActionListener.wrap(response -> { + logger.debug("create snapshot response for policy [{}] and index [{}] is: {}", policyName, indexName, + Strings.toString(response)); + final SnapshotInfo snapInfo = response.getSnapshotInfo(); + + // Check that there are no failed shards, since the request may not entirely + // fail, but may still have failures (such as in the case of an aborted snapshot) + if (snapInfo.failedShards() == 0) { + listener.onResponse(true); + } else { + int failures = snapInfo.failedShards(); + int total = snapInfo.totalShards(); + logger.warn("failed to create snapshot successfully, {} failures out of {} total shards failed", failures, total); + listener.onResponse(false); + } + }, listener::onFailure)); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java index 0f3e6d70fd8..024a206b44a 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/DeleteAction.java @@ -5,17 +5,20 @@ */ package org.elasticsearch.xpack.core.ilm; +import org.elasticsearch.Version; import org.elasticsearch.client.Client; +import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; -import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentParser; import java.io.IOException; import java.util.Arrays; import java.util.List; +import java.util.Objects; /** * A {@link LifecycleAction} which deletes the index. @@ -23,20 +26,42 @@ import java.util.List; public class DeleteAction implements LifecycleAction { public static final String NAME = "delete"; - private static final ObjectParser PARSER = new ObjectParser<>(NAME, DeleteAction::new); + public static final ParseField DELETE_SEARCHABLE_SNAPSHOT_FIELD = new ParseField("delete_searchable_snapshot"); + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new DeleteAction(a[0] == null ? true : (boolean) a[0])); + + static { + PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), DELETE_SEARCHABLE_SNAPSHOT_FIELD); + } public static DeleteAction parse(XContentParser parser) { return PARSER.apply(parser, null); } + private final boolean deleteSearchableSnapshot; + public DeleteAction() { + this(true); + } + + public DeleteAction(boolean deleteSearchableSnapshot) { + this.deleteSearchableSnapshot = deleteSearchableSnapshot; } public DeleteAction(StreamInput in) throws IOException { + if (in.getVersion().onOrAfter(Version.V_7_8_0)) { + this.deleteSearchableSnapshot = in.readBoolean(); + } else { + this.deleteSearchableSnapshot = true; + } } @Override public void writeTo(StreamOutput out) throws IOException { + if (out.getVersion().onOrAfter(Version.V_7_8_0)) { + out.writeBoolean(deleteSearchableSnapshot); + } } @Override @@ -47,6 +72,7 @@ public class DeleteAction implements LifecycleAction { @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); + builder.field(DELETE_SEARCHABLE_SNAPSHOT_FIELD.getPreferredName(), deleteSearchableSnapshot); builder.endObject(); return builder; } @@ -60,15 +86,23 @@ public class DeleteAction implements LifecycleAction { public List toSteps(Client client, String phase, Step.StepKey nextStepKey) { Step.StepKey waitForNoFollowerStepKey = new Step.StepKey(phase, NAME, WaitForNoFollowersStep.NAME); Step.StepKey deleteStepKey = new Step.StepKey(phase, NAME, DeleteStep.NAME); + Step.StepKey cleanSnapshotKey = new Step.StepKey(phase, NAME, CleanupSnapshotStep.NAME); - WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, deleteStepKey, client); - DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); - return Arrays.asList(waitForNoFollowersStep, deleteStep); + if (deleteSearchableSnapshot) { + WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, cleanSnapshotKey, client); + CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, deleteStepKey, client); + DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); + return Arrays.asList(waitForNoFollowersStep, cleanupSnapshotStep, deleteStep); + } else { + WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, deleteStepKey, client); + DeleteStep deleteStep = new DeleteStep(deleteStepKey, nextStepKey, client); + return Arrays.asList(waitForNoFollowersStep, deleteStep); + } } @Override public int hashCode() { - return 1; + return Objects.hash(deleteSearchableSnapshot); } @Override @@ -79,7 +113,8 @@ public class DeleteAction implements LifecycleAction { if (obj.getClass() != getClass()) { return false; } - return true; + DeleteAction that = (DeleteAction) obj; + return deleteSearchableSnapshot == that.deleteSearchableSnapshot; } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java new file mode 100644 index 00000000000..49d05cc4a68 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStep.java @@ -0,0 +1,176 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.index.Index; + +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.Objects; + +import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY; +import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata; + +/** + * Generates a snapshot name for the given index and records it in the index metadata along with the provided snapshot repository. + *

+ * The generated snapshot name will be in the format {day-indexName-policyName-randomUUID} + * eg.: 2020.03.30-myindex-mypolicy-cmuce-qfvmn_dstqw-ivmjc1etsa + */ +public class GenerateSnapshotNameStep extends ClusterStateActionStep { + + public static final String NAME = "generate-snapshot-name"; + + private static final Logger logger = LogManager.getLogger(CreateSnapshotStep.class); + + private static final IndexNameExpressionResolver.DateMathExpressionResolver DATE_MATH_RESOLVER = + new IndexNameExpressionResolver.DateMathExpressionResolver(); + + private final String snapshotRepository; + + public GenerateSnapshotNameStep(StepKey key, StepKey nextStepKey, String snapshotRepository) { + super(key, nextStepKey); + this.snapshotRepository = snapshotRepository; + } + + public String getSnapshotRepository() { + return snapshotRepository; + } + + @Override + public ClusterState performAction(Index index, ClusterState clusterState) { + IndexMetadata indexMetaData = clusterState.metadata().index(index); + if (indexMetaData == null) { + // Index must have been since deleted, ignore it + logger.debug("[{}] lifecycle action for index [{}] executed but index no longer exists", getKey().getAction(), index.getName()); + return clusterState; + } + + ClusterState.Builder newClusterStateBuilder = ClusterState.builder(clusterState); + + LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetaData); + assert lifecycleState.getSnapshotName() == null : "index " + index.getName() + " should not have a snapshot generated by " + + "the ilm policy but has " + lifecycleState.getSnapshotName(); + LifecycleExecutionState.Builder newCustomData = LifecycleExecutionState.builder(lifecycleState); + String policy = indexMetaData.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + String snapshotNamePrefix = ("<{now/d}-" + index.getName() + "-" + policy + ">").toLowerCase(Locale.ROOT); + String snapshotName = generateSnapshotName(snapshotNamePrefix); + ActionRequestValidationException validationException = validateGeneratedSnapshotName(snapshotNamePrefix, snapshotName); + if (validationException != null) { + logger.warn("unable to generate a snapshot name as part of policy [{}] for index [{}] due to [{}]", + policy, index.getName(), validationException.getMessage()); + throw validationException; + } + newCustomData.setSnapshotName(snapshotName); + newCustomData.setSnapshotRepository(snapshotRepository); + + IndexMetadata.Builder indexMetadataBuilder = IndexMetadata.builder(indexMetaData); + indexMetadataBuilder.putCustom(ILM_CUSTOM_METADATA_KEY, newCustomData.build().asMap()); + newClusterStateBuilder.metadata(Metadata.builder(clusterState.getMetadata()).put(indexMetadataBuilder)); + return newClusterStateBuilder.build(); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), snapshotRepository); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + GenerateSnapshotNameStep other = (GenerateSnapshotNameStep) obj; + return super.equals(obj) && + Objects.equals(snapshotRepository, other.snapshotRepository); + } + + /** + * Since snapshots need to be uniquely named, this method will resolve any date math used in + * the provided name, as well as appending a unique identifier so expressions that may overlap + * still result in unique snapshot names. + */ + public static String generateSnapshotName(String name) { + return generateSnapshotName(name, new ResolverContext()); + } + + public static String generateSnapshotName(String name, IndexNameExpressionResolver.Context context) { + List candidates = DATE_MATH_RESOLVER.resolve(context, Collections.singletonList(name)); + if (candidates.size() != 1) { + throw new IllegalStateException("resolving snapshot name " + name + " generated more than one candidate: " + candidates); + } + // TODO: we are breaking the rules of UUIDs by lowercasing this here, find an alternative (snapshot names must be lowercase) + return candidates.get(0) + "-" + UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); + } + + /** + * This is a context for the DateMathExpressionResolver, which does not require + * {@code IndicesOptions} or {@code ClusterState} since it only uses the start + * time to resolve expressions + */ + public static final class ResolverContext extends IndexNameExpressionResolver.Context { + public ResolverContext() { + this(System.currentTimeMillis()); + } + + public ResolverContext(long startTime) { + super(null, null, startTime, false, false); + } + + @Override + public ClusterState getState() { + throw new UnsupportedOperationException("should never be called"); + } + + @Override + public IndicesOptions getOptions() { + throw new UnsupportedOperationException("should never be called"); + } + } + + @Nullable + public static ActionRequestValidationException validateGeneratedSnapshotName(String snapshotPrefix, String snapshotName) { + ActionRequestValidationException err = new ActionRequestValidationException(); + if (Strings.hasText(snapshotPrefix) == false) { + err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: cannot be empty"); + } + if (snapshotName.contains("#")) { + err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must not contain '#'"); + } + if (snapshotName.charAt(0) == '_') { + err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must not start with '_'"); + } + if (snapshotName.toLowerCase(Locale.ROOT).equals(snapshotName) == false) { + err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must be lowercase"); + } + if (Strings.validFileName(snapshotName) == false) { + err.addValidationError("invalid snapshot name [" + snapshotPrefix + "]: must not contain contain the following characters " + + Strings.INVALID_FILENAME_CHARS); + } + + if (err.validationErrors().size() > 0) { + return err; + } else { + return null; + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java index 711e57d3b6d..453aba294bd 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponse.java @@ -46,6 +46,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl private static final ParseField STEP_INFO_FIELD = new ParseField("step_info"); private static final ParseField PHASE_EXECUTION_INFO = new ParseField("phase_execution"); private static final ParseField AGE_FIELD = new ParseField("age"); + private static final ParseField REPOSITORY_NAME = new ParseField("repository_name"); + private static final ParseField SNAPSHOT_NAME = new ParseField("snapshot_name"); public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( "index_lifecycle_explain_response", @@ -63,6 +65,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl (Long) (a[8]), (Long) (a[9]), (Long) (a[10]), + (String) a[16], + (String) a[17], (BytesReference) a[11], (PhaseExecutionInfo) a[12] // a[13] == "age" @@ -89,6 +93,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), AGE_FIELD); PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), IS_AUTO_RETRYABLE_ERROR_FIELD); PARSER.declareInt(ConstructingObjectParser.optionalConstructorArg(), FAILED_STEP_RETRY_COUNT_FIELD); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), REPOSITORY_NAME); + PARSER.declareString(ConstructingObjectParser.optionalConstructorArg(), SNAPSHOT_NAME); } private final String index; @@ -106,23 +112,28 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl private final PhaseExecutionInfo phaseExecutionInfo; private final Boolean isAutoRetryableError; private final Integer failedStepRetryCount; + private final String repositoryName; + private final String snapshotName; public static IndexLifecycleExplainResponse newManagedIndexResponse(String index, String policyName, Long lifecycleDate, String phase, String action, String step, String failedStep, Boolean isAutoRetryableError, Integer failedStepRetryCount, - Long phaseTime, Long actionTime, Long stepTime, BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + Long phaseTime, Long actionTime, Long stepTime, String repositoryName, String snapshotName, BytesReference stepInfo, + PhaseExecutionInfo phaseExecutionInfo) { return new IndexLifecycleExplainResponse(index, true, policyName, lifecycleDate, phase, action, step, failedStep, - isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo); + isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, repositoryName, snapshotName, stepInfo, + phaseExecutionInfo); } public static IndexLifecycleExplainResponse newUnmanagedIndexResponse(String index) { return new IndexLifecycleExplainResponse(index, false, null, null, null, null, null, null, null, null, null, null, null, null, - null); + null, null, null); } private IndexLifecycleExplainResponse(String index, boolean managedByILM, String policyName, Long lifecycleDate, String phase, String action, String step, String failedStep, Boolean isAutoRetryableError, Integer failedStepRetryCount, Long phaseTime, Long actionTime, Long stepTime, - BytesReference stepInfo, PhaseExecutionInfo phaseExecutionInfo) { + String repositoryName, String snapshotName, BytesReference stepInfo, + PhaseExecutionInfo phaseExecutionInfo) { if (managedByILM) { if (policyName == null) { throw new IllegalArgumentException("[" + POLICY_NAME_FIELD.getPreferredName() + "] cannot be null for managed index"); @@ -157,6 +168,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl this.failedStepRetryCount = failedStepRetryCount; this.stepInfo = stepInfo; this.phaseExecutionInfo = phaseExecutionInfo; + this.repositoryName = repositoryName; + this.snapshotName = snapshotName; } public IndexLifecycleExplainResponse(StreamInput in) throws IOException { @@ -181,6 +194,13 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl isAutoRetryableError = null; failedStepRetryCount = null; } + if (in.getVersion().onOrAfter(Version.V_7_8_0)) { + repositoryName = in.readOptionalString(); + snapshotName = in.readOptionalString(); + } else { + repositoryName = null; + snapshotName = null; + } } else { policyName = null; lifecycleDate = null; @@ -195,6 +215,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl stepTime = null; stepInfo = null; phaseExecutionInfo = null; + repositoryName = null; + snapshotName = null; } } @@ -218,6 +240,10 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl out.writeOptionalBoolean(isAutoRetryableError); out.writeOptionalVInt(failedStepRetryCount); } + if (out.getVersion().onOrAfter(Version.V_7_8_0)) { + out.writeOptionalString(repositoryName); + out.writeOptionalString(snapshotName); + } } } @@ -289,6 +315,14 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl } } + public String getRepositoryName() { + return repositoryName; + } + + public String getSnapshotName() { + return snapshotName; + } + @Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { builder.startObject(); @@ -327,6 +361,12 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl if (failedStepRetryCount != null) { builder.field(FAILED_STEP_RETRY_COUNT_FIELD.getPreferredName(), failedStepRetryCount); } + if (repositoryName != null) { + builder.field(REPOSITORY_NAME.getPreferredName(), repositoryName); + } + if (snapshotName != null) { + builder.field(SNAPSHOT_NAME.getPreferredName(), snapshotName); + } if (stepInfo != null && stepInfo.length() > 0) { builder.rawField(STEP_INFO_FIELD.getPreferredName(), stepInfo.streamInput(), XContentType.JSON); } @@ -341,7 +381,7 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl @Override public int hashCode() { return Objects.hash(index, managedByILM, policyName, lifecycleDate, phase, action, step, failedStep, isAutoRetryableError, - failedStepRetryCount, phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo); + failedStepRetryCount, phaseTime, actionTime, stepTime, repositoryName, snapshotName, stepInfo, phaseExecutionInfo); } @Override @@ -366,6 +406,8 @@ public class IndexLifecycleExplainResponse implements ToXContentObject, Writeabl Objects.equals(phaseTime, other.phaseTime) && Objects.equals(actionTime, other.actionTime) && Objects.equals(stepTime, other.stepTime) && + Objects.equals(repositoryName, other.repositoryName) && + Objects.equals(snapshotName, other.snapshotName) && Objects.equals(stepInfo, other.stepInfo) && Objects.equals(phaseExecutionInfo, other.phaseExecutionInfo); } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionState.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionState.java index 0607470f82b..2746b075310 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionState.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionState.java @@ -36,6 +36,8 @@ public class LifecycleExecutionState { private static final String FAILED_STEP_RETRY_COUNT = "failed_step_retry_count"; private static final String STEP_INFO = "step_info"; private static final String PHASE_DEFINITION = "phase_definition"; + private static final String SNAPSHOT_NAME ="snapshot_name"; + private static final String SNAPSHOT_REPOSITORY ="snapshot_repository"; private final String phase; private final String action; @@ -49,10 +51,12 @@ public class LifecycleExecutionState { private final Long phaseTime; private final Long actionTime; private final Long stepTime; + private final String snapshotName; + private final String snapshotRepository; private LifecycleExecutionState(String phase, String action, String step, String failedStep, Boolean isAutoRetryableError, Integer failedStepRetryCount, String stepInfo, String phaseDefinition, Long lifecycleDate, - Long phaseTime, Long actionTime, Long stepTime) { + Long phaseTime, Long actionTime, Long stepTime, String snapshotRepository, String snapshotName) { this.phase = phase; this.action = action; this.step = step; @@ -65,6 +69,8 @@ public class LifecycleExecutionState { this.phaseTime = phaseTime; this.actionTime = actionTime; this.stepTime = stepTime; + this.snapshotRepository = snapshotRepository; + this.snapshotName = snapshotName; } /** @@ -122,6 +128,8 @@ public class LifecycleExecutionState { .setIndexCreationDate(state.lifecycleDate) .setPhaseTime(state.phaseTime) .setActionTime(state.actionTime) + .setSnapshotRepository(state.snapshotRepository) + .setSnapshotName(state.snapshotName) .setStepTime(state.stepTime); } @@ -151,6 +159,12 @@ public class LifecycleExecutionState { if (customData.containsKey(PHASE_DEFINITION)) { builder.setPhaseDefinition(customData.get(PHASE_DEFINITION)); } + if (customData.containsKey(SNAPSHOT_REPOSITORY)) { + builder.setSnapshotRepository(customData.get(SNAPSHOT_REPOSITORY)); + } + if (customData.containsKey(SNAPSHOT_NAME)) { + builder.setSnapshotName(customData.get(SNAPSHOT_NAME)); + } if (customData.containsKey(INDEX_CREATION_DATE)) { try { builder.setIndexCreationDate(Long.parseLong(customData.get(INDEX_CREATION_DATE))); @@ -229,6 +243,12 @@ public class LifecycleExecutionState { if (phaseDefinition != null) { result.put(PHASE_DEFINITION, String.valueOf(phaseDefinition)); } + if (snapshotRepository != null) { + result.put(SNAPSHOT_REPOSITORY, snapshotRepository); + } + if (snapshotName != null) { + result.put(SNAPSHOT_NAME, snapshotName); + } return Collections.unmodifiableMap(result); } @@ -280,6 +300,14 @@ public class LifecycleExecutionState { return stepTime; } + public String getSnapshotName() { + return snapshotName; + } + + public String getSnapshotRepository() { + return snapshotRepository; + } + @Override public boolean equals(Object o) { if (this == o) return true; @@ -296,13 +324,16 @@ public class LifecycleExecutionState { Objects.equals(isAutoRetryableError(), that.isAutoRetryableError()) && Objects.equals(getFailedStepRetryCount(), that.getFailedStepRetryCount()) && Objects.equals(getStepInfo(), that.getStepInfo()) && + Objects.equals(getSnapshotRepository(), that.getSnapshotRepository()) && + Objects.equals(getSnapshotName(), that.getSnapshotName()) && Objects.equals(getPhaseDefinition(), that.getPhaseDefinition()); } @Override public int hashCode() { return Objects.hash(getPhase(), getAction(), getStep(), getFailedStep(), isAutoRetryableError(), getFailedStepRetryCount(), - getStepInfo(), getPhaseDefinition(), getLifecycleDate(), getPhaseTime(), getActionTime(), getStepTime()); + getStepInfo(), getPhaseDefinition(), getLifecycleDate(), getPhaseTime(), getActionTime(), getStepTime(), + getSnapshotRepository(), getSnapshotName()); } @Override @@ -323,6 +354,8 @@ public class LifecycleExecutionState { private Long stepTime; private Boolean isAutoRetryableError; private Integer failedStepRetryCount; + private String snapshotName; + private String snapshotRepository; public Builder setPhase(String phase) { this.phase = phase; @@ -384,9 +417,19 @@ public class LifecycleExecutionState { return this; } + public Builder setSnapshotRepository(String snapshotRepository) { + this.snapshotRepository = snapshotRepository; + return this; + } + + public Builder setSnapshotName(String snapshotName) { + this.snapshotName = snapshotName; + return this; + } + public LifecycleExecutionState build() { return new LifecycleExecutionState(phase, action, step, failedStep, isAutoRetryableError, failedStepRetryCount, stepInfo, - phaseDefinition, indexCreationDate, phaseTime, actionTime, stepTime); + phaseDefinition, indexCreationDate, phaseTime, actionTime, stepTime, snapshotRepository, snapshotName); } } diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java new file mode 100644 index 00000000000..5f1160a07e5 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/MountSnapshotStep.java @@ -0,0 +1,119 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; + +import java.util.Objects; + +import static org.elasticsearch.xpack.core.ilm.LifecycleExecutionState.fromIndexMetadata; + +/** + * Restores the snapshot created for the designated index via the ILM policy to an index named using the provided prefix appended to the + * designated index name. + */ +public class MountSnapshotStep extends AsyncRetryDuringSnapshotActionStep { + public static final String NAME = "mount-snapshot"; + + private static final Logger logger = LogManager.getLogger(MountSnapshotStep.class); + + private final String restoredIndexPrefix; + + public MountSnapshotStep(StepKey key, StepKey nextStepKey, Client client, String restoredIndexPrefix) { + super(key, nextStepKey, client); + this.restoredIndexPrefix = restoredIndexPrefix; + } + + @Override + public boolean isRetryable() { + return true; + } + + public String getRestoredIndexPrefix() { + return restoredIndexPrefix; + } + + @Override + void performDuringNoSnapshot(IndexMetadata indexMetadata, ClusterState currentClusterState, Listener listener) { + final String indexName = indexMetadata.getIndex().getName(); + + LifecycleExecutionState lifecycleState = fromIndexMetadata(indexMetadata); + + String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + final String snapshotRepository = lifecycleState.getSnapshotRepository(); + if (Strings.hasText(snapshotRepository) == false) { + listener.onFailure(new IllegalStateException("snapshot repository is not present for policy [" + policyName + "] and index [" + + indexName + "]")); + return; + } + + final String snapshotName = lifecycleState.getSnapshotName(); + if (Strings.hasText(snapshotName) == false) { + listener.onFailure( + new IllegalStateException("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]")); + return; + } + + String mountedIndexName = restoredIndexPrefix + indexName; + if(currentClusterState.metadata().index(mountedIndexName) != null) { + logger.debug("mounted index [{}] for policy [{}] and index [{}] already exists. will not attempt to mount the index again", + mountedIndexName, policyName, indexName); + listener.onResponse(true); + return; + } + + final MountSearchableSnapshotRequest mountSearchableSnapshotRequest = new MountSearchableSnapshotRequest(mountedIndexName, + snapshotRepository, snapshotName, indexName, Settings.builder() + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), Boolean.FALSE.toString()) + .build(), + // we captured the index metadata when we took the snapshot. the index likely had the ILM execution state in the metadata. + // if we were to restore the lifecycle.name setting, the restored index would be captured by the ILM runner and, + // depending on what ILM execution state was captured at snapshot time, make it's way forward from _that_ step forward in + // the ILM policy. + // we'll re-set this setting on the restored index at a later step once we restored a deterministic execution state + new String[]{LifecycleSettings.LIFECYCLE_NAME}, + // we'll not wait for the snapshot to complete in this step as the async steps are executed from threads that shouldn't + // perform expensive operations (ie. clusterStateProcessed) + false); + getClient().execute(MountSearchableSnapshotAction.INSTANCE, mountSearchableSnapshotRequest, + ActionListener.wrap(response -> { + if (response.status() != RestStatus.OK && response.status() != RestStatus.ACCEPTED) { + logger.debug("mount snapshot response failed to complete"); + throw new ElasticsearchException("mount snapshot response failed to complete, got response " + response.status()); + } + listener.onResponse(true); + }, listener::onFailure)); + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), restoredIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + MountSnapshotStep other = (MountSnapshotStep) obj; + return super.equals(obj) && Objects.equals(restoredIndexPrefix, other.restoredIndexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java new file mode 100644 index 00000000000..5a9b315ca78 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotAction.java @@ -0,0 +1,137 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ilm.Step.StepKey; + +import java.io.IOException; +import java.util.Arrays; +import java.util.List; +import java.util.Objects; + +/** + * A {@link LifecycleAction} that will convert the index into a searchable snapshot, by taking a snapshot of the index, creating a + * searchable snapshot and the corresponding "searchable snapshot index", deleting the original index and swapping its aliases to the + * newly created searchable snapshot backed index. + */ +public class SearchableSnapshotAction implements LifecycleAction { + public static final String NAME = "searchable_snapshot"; + + public static final ParseField SNAPSHOT_REPOSITORY = new ParseField("snapshot_repository"); + + public static final String RESTORED_INDEX_PREFIX = "restored-"; + + private static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>(NAME, + a -> new SearchableSnapshotAction((String) a[0])); + + static { + PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_REPOSITORY); + } + + public static SearchableSnapshotAction parse(XContentParser parser) { + return PARSER.apply(parser, null); + } + + private final String snapshotRepository; + + public SearchableSnapshotAction(String snapshotRepository) { + if (Strings.hasText(snapshotRepository) == false) { + throw new IllegalArgumentException("the snapshot repository must be specified"); + } + this.snapshotRepository = snapshotRepository; + } + + public SearchableSnapshotAction(StreamInput in) throws IOException { + this(in.readString()); + } + + @Override + public List toSteps(Client client, String phase, StepKey nextStepKey) { + StepKey waitForNoFollowerStepKey = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME); + StepKey generateSnapshotNameKey = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME); + StepKey cleanSnapshotKey = new StepKey(phase, NAME, CleanupSnapshotStep.NAME); + StepKey createSnapshotKey = new StepKey(phase, NAME, CreateSnapshotStep.NAME); + StepKey mountSnapshotKey = new StepKey(phase, NAME, MountSnapshotStep.NAME); + StepKey waitForGreenRestoredIndexKey = new StepKey(phase, NAME, WaitForIndexColorStep.NAME); + StepKey copyMetadataKey = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); + StepKey copyLifecyclePolicySettingKey = new StepKey(phase, NAME, CopySettingsStep.NAME); + StepKey swapAliasesKey = new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME); + + WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, generateSnapshotNameKey, + client); + GenerateSnapshotNameStep generateSnapshotNameStep = new GenerateSnapshotNameStep(generateSnapshotNameKey, cleanSnapshotKey, + snapshotRepository); + CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, createSnapshotKey, client); + AsyncActionBranchingStep createSnapshotBranchingStep = new AsyncActionBranchingStep( + new CreateSnapshotStep(createSnapshotKey, mountSnapshotKey, client), cleanSnapshotKey, client); + MountSnapshotStep mountSnapshotStep = new MountSnapshotStep(mountSnapshotKey, waitForGreenRestoredIndexKey, + client, RESTORED_INDEX_PREFIX); + WaitForIndexColorStep waitForGreenIndexHealthStep = new WaitForIndexColorStep(waitForGreenRestoredIndexKey, + copyMetadataKey, ClusterHealthStatus.GREEN, RESTORED_INDEX_PREFIX); + // a policy with only the cold phase will have a null "nextStepKey", hence the "null" nextStepKey passed in below when that's the + // case + CopyExecutionStateStep copyMetadataStep = new CopyExecutionStateStep(copyMetadataKey, copyLifecyclePolicySettingKey, + RESTORED_INDEX_PREFIX, nextStepKey != null ? nextStepKey.getName() : "null"); + CopySettingsStep copySettingsStep = new CopySettingsStep(copyLifecyclePolicySettingKey, swapAliasesKey, RESTORED_INDEX_PREFIX, + LifecycleSettings.LIFECYCLE_NAME); + // sending this step to null as the restored index (which will after this step essentially be the source index) was sent to the next + // key after we restored the lifecycle execution state + SwapAliasesAndDeleteSourceIndexStep swapAliasesAndDeleteSourceIndexStep = new SwapAliasesAndDeleteSourceIndexStep(swapAliasesKey, + null, client, RESTORED_INDEX_PREFIX); + + return Arrays.asList(waitForNoFollowersStep, generateSnapshotNameStep, cleanupSnapshotStep, createSnapshotBranchingStep, + mountSnapshotStep, waitForGreenIndexHealthStep, copyMetadataStep, copySettingsStep, swapAliasesAndDeleteSourceIndexStep); + } + + @Override + public boolean isSafeAction() { + return true; + } + + @Override + public String getWriteableName() { + return NAME; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(snapshotRepository); + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + builder.field(SNAPSHOT_REPOSITORY.getPreferredName(), snapshotRepository); + builder.endObject(); + return builder; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + SearchableSnapshotAction that = (SearchableSnapshotAction) o; + return Objects.equals(snapshotRepository, that.snapshotRepository); + } + + @Override + public int hashCode() { + return Objects.hash(snapshotRepository); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java index e94d3c8ae82..62a09f027c7 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkAction.java @@ -104,7 +104,8 @@ public class ShrinkAction implements LifecycleAction { CheckShrinkReadyStep checkShrinkReadyStep = new CheckShrinkReadyStep(allocationRoutedKey, shrinkKey); ShrinkStep shrink = new ShrinkStep(shrinkKey, enoughShardsKey, client, numberOfShards, SHRUNKEN_INDEX_PREFIX); ShrunkShardsAllocatedStep allocated = new ShrunkShardsAllocatedStep(enoughShardsKey, copyMetadataKey, SHRUNKEN_INDEX_PREFIX); - CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, aliasKey, SHRUNKEN_INDEX_PREFIX); + CopyExecutionStateStep copyMetadata = new CopyExecutionStateStep(copyMetadataKey, aliasKey, SHRUNKEN_INDEX_PREFIX, + ShrunkenIndexCheckStep.NAME); ShrinkSetAliasStep aliasSwapAndDelete = new ShrinkSetAliasStep(aliasKey, isShrunkIndexKey, client, SHRUNKEN_INDEX_PREFIX); ShrunkenIndexCheckStep waitOnShrinkTakeover = new ShrunkenIndexCheckStep(isShrunkIndexKey, nextStepKey, SHRUNKEN_INDEX_PREFIX); return Arrays.asList(conditionalSkipShrinkStep, waitForNoFollowersStep, readOnlyStep, setSingleNodeStep, checkShrinkReadyStep, diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStep.java index 1444cbc147d..ba227667610 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/ShrinkSetAliasStep.java @@ -5,15 +5,14 @@ */ package org.elasticsearch.xpack.core.ilm; -import org.elasticsearch.action.ActionListener; -import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; import org.elasticsearch.client.Client; import org.elasticsearch.cluster.ClusterState; -import org.elasticsearch.cluster.metadata.AliasMetadata; import org.elasticsearch.cluster.metadata.IndexMetadata; import java.util.Objects; +import static org.elasticsearch.xpack.core.ilm.SwapAliasesAndDeleteSourceIndexStep.deleteSourceIndexAndTransferAliases; + /** * Following shrinking an index and deleting the original index, this step creates an alias with the same name as the original index which * points to the new shrunken index to allow clients to continue to use the original index name without being aware that it has shrunk. @@ -37,23 +36,7 @@ public class ShrinkSetAliasStep extends AsyncRetryDuringSnapshotActionStep { String index = indexMetadata.getIndex().getName(); // get target shrink index String targetIndexName = shrunkIndexPrefix + index; - IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest() - .masterNodeTimeout(getMasterTimeout(currentState)) - .addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index(index)) - .addAliasAction(IndicesAliasesRequest.AliasActions.add().index(targetIndexName).alias(index)); - // copy over other aliases from original index - indexMetadata.getAliases().values().spliterator().forEachRemaining(aliasMetadataObjectCursor -> { - AliasMetadata aliasMetadataToAdd = aliasMetadataObjectCursor.value; - // inherit all alias properties except `is_write_index` - aliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() - .index(targetIndexName).alias(aliasMetadataToAdd.alias()) - .indexRouting(aliasMetadataToAdd.indexRouting()) - .searchRouting(aliasMetadataToAdd.searchRouting()) - .filter(aliasMetadataToAdd.filter() == null ? null : aliasMetadataToAdd.filter().string()) - .writeIndex(null)); - }); - getClient().admin().indices().aliases(aliasesRequest, ActionListener.wrap(response -> - listener.onResponse(true), listener::onFailure)); + deleteSourceIndexAndTransferAliases(getClient(), indexMetadata, getMasterTimeout(currentState), targetIndexName, listener); } @Override diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java new file mode 100644 index 00000000000..55a00fd2fb2 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStep.java @@ -0,0 +1,121 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.ClusterStateObserver; +import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.unit.TimeValue; + +import java.util.Locale; +import java.util.Objects; + +/** + * This step swaps all the aliases from the source index to the restored index and deletes the source index. This is useful in scenarios + * following a restore from snapshot operation where the restored index will take the place of the source index in the ILM lifecycle. + */ +public class SwapAliasesAndDeleteSourceIndexStep extends AsyncActionStep { + public static final String NAME = "swap-aliases"; + private static final Logger logger = LogManager.getLogger(SwapAliasesAndDeleteSourceIndexStep.class); + + private final String targetIndexPrefix; + + public SwapAliasesAndDeleteSourceIndexStep(StepKey key, StepKey nextStepKey, Client client, String targetIndexPrefix) { + super(key, nextStepKey, client); + this.targetIndexPrefix = targetIndexPrefix; + } + + @Override + public boolean isRetryable() { + return true; + } + + public String getTargetIndexPrefix() { + return targetIndexPrefix; + } + + @Override + public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer, + Listener listener) { + String originalIndex = indexMetadata.getIndex().getName(); + final String targetIndexName = targetIndexPrefix + originalIndex; + IndexMetadata targetIndexMetadata = currentClusterState.metadata().index(targetIndexName); + + if (targetIndexMetadata == null) { + String policyName = indexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME); + String errorMessage = String.format(Locale.ROOT, "target index [%s] doesn't exist. stopping execution of lifecycle [%s] for" + + " index [%s]", targetIndexName, policyName, originalIndex); + logger.debug(errorMessage); + listener.onFailure(new IllegalStateException(errorMessage)); + return; + } + + deleteSourceIndexAndTransferAliases(getClient(), indexMetadata, getMasterTimeout(currentClusterState), targetIndexName, listener); + } + + /** + * Executes an {@link IndicesAliasesRequest} to copy over all the aliases from the source to the target index, and remove the source + * index. + *

+ * The is_write_index will *not* be set on the target index as this operation is currently executed on read-only indices. + */ + static void deleteSourceIndexAndTransferAliases(Client client, IndexMetadata sourceIndex, TimeValue masterTimeoutValue, + String targetIndex, Listener listener) { + String sourceIndexName = sourceIndex.getIndex().getName(); + IndicesAliasesRequest aliasesRequest = new IndicesAliasesRequest() + .masterNodeTimeout(masterTimeoutValue) + .addAliasAction(IndicesAliasesRequest.AliasActions.removeIndex().index(sourceIndexName)) + .addAliasAction(IndicesAliasesRequest.AliasActions.add().index(targetIndex).alias(sourceIndexName)); + // copy over other aliases from source index + sourceIndex.getAliases().values().spliterator().forEachRemaining(aliasMetaDataObjectCursor -> { + AliasMetadata aliasMetaDataToAdd = aliasMetaDataObjectCursor.value; + // inherit all alias properties except `is_write_index` + aliasesRequest.addAliasAction(IndicesAliasesRequest.AliasActions.add() + .index(targetIndex).alias(aliasMetaDataToAdd.alias()) + .indexRouting(aliasMetaDataToAdd.indexRouting()) + .searchRouting(aliasMetaDataToAdd.searchRouting()) + .filter(aliasMetaDataToAdd.filter() == null ? null : aliasMetaDataToAdd.filter().string()) + .writeIndex(null)); + }); + + client.admin().indices().aliases(aliasesRequest, + ActionListener.wrap(response -> { + if (response.isAcknowledged() == false) { + logger.warn("aliases swap from [{}] to [{}] response was not acknowledged", sourceIndexName, targetIndex); + } + listener.onResponse(true); + }, listener::onFailure)); + } + + @Override + public boolean indexSurvives() { + return false; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), targetIndexPrefix); + } + + @Override + public boolean equals(Object obj) { + if (obj == null) { + return false; + } + if (getClass() != obj.getClass()) { + return false; + } + SwapAliasesAndDeleteSourceIndexStep other = (SwapAliasesAndDeleteSourceIndexStep) obj; + return super.equals(obj) && + Objects.equals(targetIndexPrefix, other.targetIndexPrefix); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java index b520a41e449..5a4991796d6 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleType.java @@ -41,7 +41,7 @@ public class TimeseriesLifecycleType implements LifecycleType { static final List ORDERED_VALID_WARM_ACTIONS = Arrays.asList(SetPriorityAction.NAME, UnfollowAction.NAME, ReadOnlyAction.NAME, AllocateAction.NAME, ShrinkAction.NAME, ForceMergeAction.NAME); static final List ORDERED_VALID_COLD_ACTIONS = Arrays.asList(SetPriorityAction.NAME, UnfollowAction.NAME, AllocateAction.NAME, - FreezeAction.NAME); + FreezeAction.NAME, SearchableSnapshotAction.NAME); static final List ORDERED_VALID_DELETE_ACTIONS = Arrays.asList(WaitForSnapshotAction.NAME, DeleteAction.NAME); static final Set VALID_HOT_ACTIONS = Sets.newHashSet(ORDERED_VALID_HOT_ACTIONS); static final Set VALID_WARM_ACTIONS = Sets.newHashSet(ORDERED_VALID_WARM_ACTIONS); @@ -74,8 +74,9 @@ public class TimeseriesLifecycleType implements LifecycleType { Phase phase = phases.get(phaseName); if (phase != null) { Map actions = phase.getActions(); - if (actions.containsKey(UnfollowAction.NAME) == false - && (actions.containsKey(RolloverAction.NAME) || actions.containsKey(ShrinkAction.NAME))) { + if (actions.containsKey(UnfollowAction.NAME) == false && + (actions.containsKey(RolloverAction.NAME) || actions.containsKey(ShrinkAction.NAME) || + actions.containsKey(SearchableSnapshotAction.NAME))) { Map actionMap = new HashMap<>(phase.getActions()); actionMap.put(UnfollowAction.NAME, new UnfollowAction()); phase = new Phase(phase.getName(), phase.getMinimumAge(), actionMap); diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java index fd02a69999c..2984581cfe9 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStep.java @@ -7,42 +7,58 @@ package org.elasticsearch.xpack.core.ilm; import com.carrotsearch.hppc.cursors.ObjectCursor; +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; +import org.elasticsearch.cluster.metadata.IndexMetadata; import org.elasticsearch.cluster.routing.IndexRoutingTable; import org.elasticsearch.cluster.routing.IndexShardRoutingTable; -import org.elasticsearch.cluster.routing.RoutingTable; import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.xcontent.ToXContentObject; import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.index.Index; import java.io.IOException; +import java.util.Locale; import java.util.Objects; /** - * Wait Step for index based on color + * Wait Step for index based on color. Optionally derives the index name using the provided prefix (if any). */ - class WaitForIndexColorStep extends ClusterStateWaitStep { static final String NAME = "wait-for-index-color"; + private static final Logger logger = LogManager.getLogger(WaitForIndexColorStep.class); + private final ClusterHealthStatus color; + @Nullable + private final String indexNamePrefix; WaitForIndexColorStep(StepKey key, StepKey nextStepKey, ClusterHealthStatus color) { + this(key, nextStepKey, color, null); + } + + WaitForIndexColorStep(StepKey key, StepKey nextStepKey, ClusterHealthStatus color, @Nullable String indexNamePrefix) { super(key, nextStepKey); this.color = color; + this.indexNamePrefix = indexNamePrefix; } public ClusterHealthStatus getColor() { return this.color; } + public String getIndexNamePrefix() { + return indexNamePrefix; + } + @Override public int hashCode() { - return Objects.hash(super.hashCode(), this.color); + return Objects.hash(super.hashCode(), this.color, this.indexNamePrefix); } @Override @@ -54,13 +70,23 @@ class WaitForIndexColorStep extends ClusterStateWaitStep { return false; } WaitForIndexColorStep other = (WaitForIndexColorStep) obj; - return super.equals(obj) && Objects.equals(this.color, other.color); + return super.equals(obj) && Objects.equals(this.color, other.color) && Objects.equals(this.indexNamePrefix, other.indexNamePrefix); } @Override public Result isConditionMet(Index index, ClusterState clusterState) { - RoutingTable routingTable = clusterState.routingTable(); - IndexRoutingTable indexRoutingTable = routingTable.index(index); + String indexName = indexNamePrefix != null ? indexNamePrefix + index.getName() : index.getName(); + IndexMetadata indexMetadata = clusterState.metadata().index(index); + + if (indexMetadata == null) { + String errorMessage = String.format(Locale.ROOT, "[%s] lifecycle action for index [%s] executed but index no longer exists", + getKey().getAction(), indexName); + // Index must have been since deleted + logger.debug(errorMessage); + return new Result(false, new Info(errorMessage)); + } + + IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(indexMetadata.getIndex()); Result result; switch (this.color) { case GREEN: diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotAction.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotAction.java new file mode 100644 index 00000000000..fe31ff5d69d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotAction.java @@ -0,0 +1,20 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.searchablesnapshots; + +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; + +public class MountSearchableSnapshotAction extends ActionType { + + public static final MountSearchableSnapshotAction INSTANCE = new MountSearchableSnapshotAction(); + public static final String NAME = "cluster:admin/snapshot/mount"; + + private MountSearchableSnapshotAction() { + super(NAME, RestoreSnapshotResponse::new); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java new file mode 100644 index 00000000000..a559a913a0d --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/MountSearchableSnapshotRequest.java @@ -0,0 +1,189 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.core.searchablesnapshots; + +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.common.ParseField; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ConstructingObjectParser; +import org.elasticsearch.common.xcontent.ObjectParser; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.rest.RestRequest; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Objects; +import java.util.stream.Collectors; + +import static org.elasticsearch.common.settings.Settings.readSettingsFromStream; +import static org.elasticsearch.common.settings.Settings.writeSettingsToStream; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.constructorArg; +import static org.elasticsearch.common.xcontent.ConstructingObjectParser.optionalConstructorArg; + +public class MountSearchableSnapshotRequest extends MasterNodeRequest { + + public static final ConstructingObjectParser PARSER = new ConstructingObjectParser<>( + "mount_searchable_snapshot", true, + (a, request) -> new MountSearchableSnapshotRequest( + (String) ((a[1] != null) ? a[1] : Objects.requireNonNull(a[0])), + request.param("repository"), + request.param("snapshot"), + (String) a[0], + (Settings) ((a[2] != null) ? a[2] : Settings.EMPTY), + (String[]) ((a[3] != null) ? a[3] : Strings.EMPTY_ARRAY), + request.paramAsBoolean("wait_for_completion", false))); + + private static final ParseField INDEX_FIELD = new ParseField("index"); + private static final ParseField RENAMED_INDEX_FIELD = new ParseField("renamed_index"); + private static final ParseField INDEX_SETTINGS_FIELD = new ParseField("index_settings"); + private static final ParseField IGNORE_INDEX_SETTINGS_FIELD = new ParseField("ignore_index_settings"); + + static { + PARSER.declareField(constructorArg(), XContentParser::text, INDEX_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), XContentParser::text, RENAMED_INDEX_FIELD, ObjectParser.ValueType.STRING); + PARSER.declareField(optionalConstructorArg(), Settings::fromXContent, INDEX_SETTINGS_FIELD, ObjectParser.ValueType.OBJECT); + PARSER.declareField(optionalConstructorArg(), + p -> p.list().stream().map(s -> (String) s).collect(Collectors.toList()).toArray(Strings.EMPTY_ARRAY), + IGNORE_INDEX_SETTINGS_FIELD, ObjectParser.ValueType.STRING_ARRAY); + } + + private final String mountedIndexName; + private final String repositoryName; + private final String snapshotName; + private final String snapshotIndexName; + private final Settings indexSettings; + private final String[] ignoredIndexSettings; + private final boolean waitForCompletion; + + /** + * Constructs a new mount searchable snapshot request, restoring an index with the settings needed to make it a searchable snapshot. + */ + public MountSearchableSnapshotRequest(String mountedIndexName, String repositoryName, String snapshotName, String snapshotIndexName, + Settings indexSettings, String[] ignoredIndexSettings, boolean waitForCompletion) { + this.mountedIndexName = Objects.requireNonNull(mountedIndexName); + this.repositoryName = Objects.requireNonNull(repositoryName); + this.snapshotName = Objects.requireNonNull(snapshotName); + this.snapshotIndexName = Objects.requireNonNull(snapshotIndexName); + this.indexSettings = Objects.requireNonNull(indexSettings); + this.ignoredIndexSettings = Objects.requireNonNull(ignoredIndexSettings); + this.waitForCompletion = waitForCompletion; + } + + public MountSearchableSnapshotRequest(StreamInput in) throws IOException { + super(in); + this.mountedIndexName = in.readString(); + this.repositoryName = in.readString(); + this.snapshotName = in.readString(); + this.snapshotIndexName = in.readString(); + this.indexSettings = readSettingsFromStream(in); + this.ignoredIndexSettings = in.readStringArray(); + this.waitForCompletion = in.readBoolean(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeString(mountedIndexName); + out.writeString(repositoryName); + out.writeString(snapshotName); + out.writeString(snapshotIndexName); + writeSettingsToStream(indexSettings, out); + out.writeStringArray(ignoredIndexSettings); + out.writeBoolean(waitForCompletion); + } + + @Override + public ActionRequestValidationException validate() { + return null; + } + + /** + * @return the name of the index that will be created + */ + public String mountedIndexName() { + return mountedIndexName; + } + + /** + * @return the name of the repository + */ + public String repositoryName() { + return this.repositoryName; + } + + /** + * @return the name of the snapshot. + */ + public String snapshotName() { + return this.snapshotName; + } + + /** + * @return the name of the index contained in the snapshot + */ + public String snapshotIndexName() { + return snapshotIndexName; + } + + /** + * @return true if the operation will wait for completion + */ + public boolean waitForCompletion() { + return waitForCompletion; + } + + /** + * @return settings that should be added to the index when it is mounted + */ + public Settings indexSettings() { + return this.indexSettings; + } + + /** + * @return the names of settings that should be removed from the index when it is mounted + */ + public String[] ignoreIndexSettings() { + return ignoredIndexSettings; + } + + @Override + public String getDescription() { + return "mount snapshot [" + repositoryName + ":" + snapshotName + ":" + snapshotIndexName + "] as [" + mountedIndexName + "]"; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + MountSearchableSnapshotRequest that = (MountSearchableSnapshotRequest) o; + return waitForCompletion == that.waitForCompletion && + Objects.equals(mountedIndexName, that.mountedIndexName) && + Objects.equals(repositoryName, that.repositoryName) && + Objects.equals(snapshotName, that.snapshotName) && + Objects.equals(snapshotIndexName, that.snapshotIndexName) && + Objects.equals(indexSettings, that.indexSettings) && + Arrays.equals(ignoredIndexSettings, that.ignoredIndexSettings) && + Objects.equals(masterNodeTimeout, that.masterNodeTimeout); + } + + @Override + public int hashCode() { + int result = Objects.hash(mountedIndexName, repositoryName, snapshotName, snapshotIndexName, indexSettings, waitForCompletion, + masterNodeTimeout); + result = 31 * result + Arrays.hashCode(ignoredIndexSettings); + return result; + } + + @Override + public String toString() { + return getDescription(); + } +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotShardStats.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotShardStats.java new file mode 100644 index 00000000000..36ea9ba4c73 --- /dev/null +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/searchablesnapshots/SearchableSnapshotShardStats.java @@ -0,0 +1,452 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.searchablesnapshots; + +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.common.xcontent.ToXContentObject; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotId; + +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; + +import static java.util.Collections.unmodifiableList; +import static java.util.stream.Collectors.toList; + +public class SearchableSnapshotShardStats implements Writeable, ToXContentObject { + + private final List inputStats; + private final ShardRouting shardRouting; + private final SnapshotId snapshotId; + private final IndexId indexId; + + public SearchableSnapshotShardStats(ShardRouting shardRouting, SnapshotId snapshotId, IndexId indexId, + List stats) { + this.shardRouting = Objects.requireNonNull(shardRouting); + this.snapshotId = Objects.requireNonNull(snapshotId); + this.indexId = Objects.requireNonNull(indexId); + this.inputStats = unmodifiableList(Objects.requireNonNull(stats)); + } + + public SearchableSnapshotShardStats(StreamInput in) throws IOException { + this.shardRouting = new ShardRouting(in); + this.snapshotId = new SnapshotId(in); + this.indexId = new IndexId(in); + this.inputStats = in.readList(CacheIndexInputStats::new); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + shardRouting.writeTo(out); + snapshotId.writeTo(out); + indexId.writeTo(out); + out.writeList(inputStats); + } + + public ShardRouting getShardRouting() { + return shardRouting; + } + + public SnapshotId getSnapshotId() { + return snapshotId; + } + + public IndexId getIndexId() { + return indexId; + } + + public List getStats() { + return inputStats; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("snapshot_uuid", getSnapshotId().getUUID()); + builder.field("index_uuid", getIndexId().getId()); + builder.startObject("shard"); + { + builder.field("state", shardRouting.state()); + builder.field("primary", shardRouting.primary()); + builder.field("node", shardRouting.currentNodeId()); + if (shardRouting.relocatingNodeId() != null) { + builder.field("relocating_node", shardRouting.relocatingNodeId()); + } + } + builder.endObject(); + builder.startArray("files"); + { + List stats = inputStats.stream() + .sorted(Comparator.comparing(CacheIndexInputStats::getFileName)).collect(toList()); + for (CacheIndexInputStats stat : stats) { + stat.toXContent(builder, params); + } + } + builder.endArray(); + } + return builder.endObject(); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + SearchableSnapshotShardStats that = (SearchableSnapshotShardStats) other; + return Objects.equals(shardRouting, that.shardRouting) + && Objects.equals(snapshotId, that.snapshotId) + && Objects.equals(indexId, that.indexId) + && Objects.equals(inputStats, that.inputStats); + } + + @Override + public int hashCode() { + return Objects.hash(shardRouting, snapshotId, indexId, inputStats); + } + + public static class CacheIndexInputStats implements Writeable, ToXContentObject { + + private final String fileName; + private final long fileLength; + + private final long openCount; + private final long closeCount; + + private final Counter forwardSmallSeeks; + private final Counter backwardSmallSeeks; + private final Counter forwardLargeSeeks; + private final Counter backwardLargeSeeks; + private final Counter contiguousReads; + private final Counter nonContiguousReads; + private final Counter cachedBytesRead; + private final TimedCounter cachedBytesWritten; + private final TimedCounter directBytesRead; + private final TimedCounter optimizedBytesRead; + + public CacheIndexInputStats(String fileName, long fileLength, long openCount, long closeCount, + Counter forwardSmallSeeks, Counter backwardSmallSeeks, + Counter forwardLargeSeeks, Counter backwardLargeSeeks, + Counter contiguousReads, Counter nonContiguousReads, + Counter cachedBytesRead, TimedCounter cachedBytesWritten, + TimedCounter directBytesRead, TimedCounter optimizedBytesRead) { + this.fileName = fileName; + this.fileLength = fileLength; + this.openCount = openCount; + this.closeCount = closeCount; + this.forwardSmallSeeks = forwardSmallSeeks; + this.backwardSmallSeeks = backwardSmallSeeks; + this.forwardLargeSeeks = forwardLargeSeeks; + this.backwardLargeSeeks = backwardLargeSeeks; + this.contiguousReads = contiguousReads; + this.nonContiguousReads = nonContiguousReads; + this.cachedBytesRead = cachedBytesRead; + this.cachedBytesWritten = cachedBytesWritten; + this.directBytesRead = directBytesRead; + this.optimizedBytesRead = optimizedBytesRead; + } + + CacheIndexInputStats(final StreamInput in) throws IOException { + this.fileName = in.readString(); + this.fileLength = in.readVLong(); + this.openCount = in.readVLong(); + this.closeCount = in.readVLong(); + this.forwardSmallSeeks = new Counter(in); + this.backwardSmallSeeks = new Counter(in); + this.forwardLargeSeeks = new Counter(in); + this.backwardLargeSeeks = new Counter(in); + this.contiguousReads = new Counter(in); + this.nonContiguousReads = new Counter(in); + this.cachedBytesRead = new Counter(in); + this.cachedBytesWritten = new TimedCounter(in); + this.directBytesRead = new TimedCounter(in); + this.optimizedBytesRead = new TimedCounter(in); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + out.writeString(fileName); + out.writeVLong(fileLength); + out.writeVLong(openCount); + out.writeVLong(closeCount); + + forwardSmallSeeks.writeTo(out); + backwardSmallSeeks.writeTo(out); + forwardLargeSeeks.writeTo(out); + backwardLargeSeeks.writeTo(out); + contiguousReads.writeTo(out); + nonContiguousReads.writeTo(out); + cachedBytesRead.writeTo(out); + cachedBytesWritten.writeTo(out); + directBytesRead.writeTo(out); + optimizedBytesRead.writeTo(out); + } + + public String getFileName() { + return fileName; + } + + public long getFileLength() { + return fileLength; + } + + public long getOpenCount() { + return openCount; + } + + public long getCloseCount() { + return closeCount; + } + + public Counter getForwardSmallSeeks() { + return forwardSmallSeeks; + } + + public Counter getBackwardSmallSeeks() { + return backwardSmallSeeks; + } + + public Counter getForwardLargeSeeks() { + return forwardLargeSeeks; + } + + public Counter getBackwardLargeSeeks() { + return backwardLargeSeeks; + } + + public Counter getContiguousReads() { + return contiguousReads; + } + + public Counter getNonContiguousReads() { + return nonContiguousReads; + } + + public Counter getCachedBytesRead() { + return cachedBytesRead; + } + + public TimedCounter getCachedBytesWritten() { + return cachedBytesWritten; + } + + public TimedCounter getDirectBytesRead() { + return directBytesRead; + } + + public TimedCounter getOptimizedBytesRead() { + return optimizedBytesRead; + } + + @Override + public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("name", getFileName()); + builder.field("length", getFileLength()); + builder.field("open_count", getOpenCount()); + builder.field("close_count", getCloseCount()); + builder.field("contiguous_bytes_read", getContiguousReads()); + builder.field("non_contiguous_bytes_read", getNonContiguousReads()); + builder.field("cached_bytes_read", getCachedBytesRead()); + builder.field("cached_bytes_written", getCachedBytesWritten()); + builder.field("direct_bytes_read", getDirectBytesRead()); + builder.field("optimized_bytes_read", getOptimizedBytesRead()); + { + builder.startObject("forward_seeks"); + builder.field("small", getForwardSmallSeeks()); + builder.field("large", getForwardLargeSeeks()); + builder.endObject(); + } + { + builder.startObject("backward_seeks"); + builder.field("small", getBackwardSmallSeeks()); + builder.field("large", getBackwardLargeSeeks()); + builder.endObject(); + } + } + return builder.endObject(); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + CacheIndexInputStats stats = (CacheIndexInputStats) other; + return fileLength == stats.fileLength + && openCount == stats.openCount + && closeCount == stats.closeCount + && Objects.equals(fileName, stats.fileName) + && Objects.equals(forwardSmallSeeks, stats.forwardSmallSeeks) + && Objects.equals(backwardSmallSeeks, stats.backwardSmallSeeks) + && Objects.equals(forwardLargeSeeks, stats.forwardLargeSeeks) + && Objects.equals(backwardLargeSeeks, stats.backwardLargeSeeks) + && Objects.equals(contiguousReads, stats.contiguousReads) + && Objects.equals(nonContiguousReads, stats.nonContiguousReads) + && Objects.equals(cachedBytesRead, stats.cachedBytesRead) + && Objects.equals(cachedBytesWritten, stats.cachedBytesWritten) + && Objects.equals(directBytesRead, stats.directBytesRead) + && Objects.equals(optimizedBytesRead, stats.optimizedBytesRead); + } + + @Override + public int hashCode() { + return Objects.hash(fileName, fileLength, openCount, closeCount, + forwardSmallSeeks, backwardSmallSeeks, + forwardLargeSeeks, backwardLargeSeeks, + contiguousReads, nonContiguousReads, + cachedBytesRead, cachedBytesWritten, + directBytesRead, optimizedBytesRead); + } + } + + public static class Counter implements Writeable, ToXContentObject { + + private final long count; + private final long total; + private final long min; + private final long max; + + public Counter(final long count, final long total, final long min, final long max) { + this.count = count; + this.total = total; + this.min = min; + this.max = max; + } + + Counter(final StreamInput in) throws IOException { + this.count = in.readZLong(); + this.total = in.readZLong(); + this.min = in.readZLong(); + this.max = in.readZLong(); + } + + @Override + public void writeTo(final StreamOutput out) throws IOException { + out.writeZLong(count); + out.writeZLong(total); + out.writeZLong(min); + out.writeZLong(max); + } + + @Override + public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { + builder.startObject(); + { + builder.field("count", count); + builder.field("sum", total); + builder.field("min", min); + builder.field("max", max); + innerToXContent(builder, params); + } + builder.endObject(); + return builder; + } + + void innerToXContent(XContentBuilder builder, Params params) throws IOException { + } + + public long getCount() { + return count; + } + + public long getTotal() { + return total; + } + + public long getMin() { + return min; + } + + public long getMax() { + return max; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + Counter that = (Counter) other; + return count == that.count + && total == that.total + && min == that.min + && max == that.max; + } + + @Override + public int hashCode() { + return Objects.hash(count, total, min, max); + } + } + + public static class TimedCounter extends Counter { + + private final long totalNanoseconds; + + public TimedCounter(long count, long total, long min, long max, long totalNanoseconds) { + super(count, total, min, max); + this.totalNanoseconds = totalNanoseconds; + } + + TimedCounter(StreamInput in) throws IOException { + super(in); + totalNanoseconds = in.readZLong(); + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeZLong(totalNanoseconds); + } + + @Override + void innerToXContent(XContentBuilder builder, Params params) throws IOException { + if (builder.humanReadable()) { + builder.field("time", TimeValue.timeValueNanos(totalNanoseconds).toString()); + } + builder.field("time_in_nanos", totalNanoseconds); + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + if (other == null || getClass() != other.getClass()) { + return false; + } + if (super.equals(other) == false) { + return false; + } + TimedCounter that = (TimedCounter) other; + return totalNanoseconds == that.totalNanoseconds; + } + + @Override + public int hashCode() { + return Objects.hash(super.hashCode(), totalNanoseconds); + } + } + +} diff --git a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java index 72c74de5cee..bc32be04234 100644 --- a/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java +++ b/x-pack/plugin/core/src/main/java/org/elasticsearch/xpack/core/slm/SnapshotLifecyclePolicy.java @@ -10,16 +10,11 @@ import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionRequestValidationException; import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; -import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.cluster.AbstractDiffable; -import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.Diffable; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; -import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver.Context; import org.elasticsearch.common.Nullable; import org.elasticsearch.common.ParseField; import org.elasticsearch.common.Strings; -import org.elasticsearch.common.UUIDs; import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.Writeable; @@ -31,14 +26,13 @@ import org.elasticsearch.xpack.core.scheduler.Cron; import java.io.IOException; import java.nio.charset.StandardCharsets; -import java.util.Collections; import java.util.HashMap; -import java.util.List; -import java.util.Locale; import java.util.Map; import java.util.Objects; import static org.elasticsearch.cluster.metadata.MetadataCreateIndexService.MAX_INDEX_NAME_BYTES; +import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.generateSnapshotName; +import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.validateGeneratedSnapshotName; /** * A {@code SnapshotLifecyclePolicy} is a policy for the cluster including a schedule of when a @@ -62,8 +56,6 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable candidates = DATE_MATH_RESOLVER.resolve(context, Collections.singletonList(this.name)); - if (candidates.size() != 1) { - throw new IllegalStateException("resolving snapshot name " + this.name + " generated more than one candidate: " + candidates); - } - // TODO: we are breaking the rules of UUIDs by lowercasing this here, find an alternative (snapshot names must be lowercase) - return candidates.get(0) + "-" + UUIDs.randomBase64UUID().toLowerCase(Locale.ROOT); - } - /** * Generate a new create snapshot request from this policy. The name of the snapshot is * generated at this time based on any date math expressions in the "name" field. */ public CreateSnapshotRequest toRequest() { - CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(new ResolverContext())); + CreateSnapshotRequest req = new CreateSnapshotRequest(repository, generateSnapshotName(this.name)); Map mergedConfiguration = configuration == null ? new HashMap<>() : new HashMap<>(configuration); @SuppressWarnings("unchecked") Map metadata = (Map) mergedConfiguration.get("metadata"); @@ -324,28 +290,4 @@ public class SnapshotLifecyclePolicy extends AbstractDiffable { + + @Override + protected AsyncActionBranchingStep createRandomInstance() { + return new AsyncActionBranchingStep(new UpdateSettingsStep(randomStepKey(), randomStepKey(), client, Settings.EMPTY), + randomStepKey(), client); + } + + @Override + protected AsyncActionBranchingStep mutateInstance(AsyncActionBranchingStep instance) { + AsyncActionStep wrappedStep = instance.getStepToExecute(); + Step.StepKey nextKeyOnIncompleteResponse = instance.getNextKeyOnIncompleteResponse(); + + switch (between(0, 1)) { + case 0: + wrappedStep = new UpdateSettingsStep(randomStepKey(), randomStepKey(), client, Settings.EMPTY); + break; + case 1: + nextKeyOnIncompleteResponse = randomStepKey(); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new AsyncActionBranchingStep(wrappedStep, nextKeyOnIncompleteResponse, client); + } + + @Override + protected AsyncActionBranchingStep copyInstance(AsyncActionBranchingStep instance) { + return new AsyncActionBranchingStep(instance.getStepToExecute(), instance.getNextKeyOnIncompleteResponse(), instance.getClient()); + } + + protected IndexMetadata getIndexMetadata() { + return IndexMetadata.builder(randomAlphaOfLength(10)).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)).build(); + } + + public void testBranchStepKeyIsTheWrappedStepKey() { + AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) { + @Override + public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer, + Listener listener) { + } + }; + + AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, randomStepKey(), client); + assertThat(asyncActionBranchingStep.getKey(), is(stepToExecute.getKey())); + } + + public void testBranchStepNextKeyOnCompleteResponse() { + AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) { + @Override + public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer, + Listener listener) { + listener.onResponse(true); + } + }; + + AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, randomStepKey(), client); + + asyncActionBranchingStep.performAction(getIndexMetadata(), emptyClusterState(), null, new AsyncActionStep.Listener() { + + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(true)); + } + + @Override + public void onFailure(Exception e) { + fail("not expecting a failure as the wrapped step was successful"); + } + }); + assertThat(asyncActionBranchingStep.getNextStepKey(), is(stepToExecute.getNextStepKey())); + } + + public void testBranchStepNextKeyOnInCompleteResponse() { + AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) { + @Override + public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer, + Listener listener) { + listener.onResponse(false); + } + }; + + Step.StepKey nextKeyOnIncompleteResponse = randomStepKey(); + AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, nextKeyOnIncompleteResponse, + client); + + asyncActionBranchingStep.performAction(getIndexMetadata(), emptyClusterState(), null, new AsyncActionStep.Listener() { + + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(false)); + } + + @Override + public void onFailure(Exception e) { + fail("not expecting a failure as the wrapped step was successful"); + } + }); + assertThat(asyncActionBranchingStep.getNextStepKey(), is(nextKeyOnIncompleteResponse)); + } + + public void testBranchStepPropagatesFailure() { + NullPointerException failException = new NullPointerException("fail"); + AsyncActionStep stepToExecute = new AsyncActionStep(randomStepKey(), randomStepKey(), client) { + @Override + public void performAction(IndexMetadata indexMetadata, ClusterState currentClusterState, ClusterStateObserver observer, + Listener listener) { + listener.onFailure(failException); + } + }; + + AsyncActionBranchingStep asyncActionBranchingStep = new AsyncActionBranchingStep(stepToExecute, randomStepKey(), client); + + asyncActionBranchingStep.performAction(getIndexMetadata(), emptyClusterState(), null, new AsyncActionStep.Listener() { + + @Override + public void onResponse(boolean complete) { + fail("expecting a failure as the wrapped step failed"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, is(failException)); + } + }); + expectThrows(IllegalStateException.class, () -> asyncActionBranchingStep.getNextStepKey()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java new file mode 100644 index 00000000000..cc92d478073 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CleanupSnapshotStepTests.java @@ -0,0 +1,156 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.ilm.Step.StepKey; + +import java.util.Map; + +import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class CleanupSnapshotStepTests extends AbstractStepTestCase { + + @Override + public CleanupSnapshotStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new CleanupSnapshotStep(stepKey, nextStepKey, client); + } + + @Override + protected CleanupSnapshotStep copyInstance(CleanupSnapshotStep instance) { + return new CleanupSnapshotStep(instance.getKey(), instance.getNextStepKey(), instance.getClient()); + } + + @Override + public CleanupSnapshotStep mutateInstance(CleanupSnapshotStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new CleanupSnapshotStep(key, nextKey, instance.getClient()); + } + + public void testPerformActionDoesntFailIfSnapshotInfoIsMissing() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + + { + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + CleanupSnapshotStep cleanupSnapshotStep = createRandomInstance(); + cleanupSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(true)); + } + + @Override + public void onFailure(Exception e) { + fail("expecting the step to report success if repository information is missing from the ILM execution state as there" + + " is no snapshot to delete"); + } + }); + } + + { + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + Map ilmCustom = org.elasticsearch.common.collect.Map.of("snapshot_repository", "repository_name"); + indexMetadataBuilder.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom); + + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + CleanupSnapshotStep cleanupSnapshotStep = createRandomInstance(); + cleanupSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(true)); + } + + @Override + public void onFailure(Exception e) { + fail("expecting the step to report success if the snapshot name is missing from the ILM execution state as there is " + + "no snapshot to delete"); + } + }); + } + } + + public void testPerformAction() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + String snapshotName = indexName + "-" + policyName; + Map ilmCustom = org.elasticsearch.common.collect.Map.of("snapshot_name", snapshotName); + + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + try (NoOpClient client = getDeleteSnapshotRequestAssertingClient(snapshotName)) { + CleanupSnapshotStep step = new CleanupSnapshotStep(randomStepKey(), randomStepKey(), client); + step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + } + + @Override + public void onFailure(Exception e) { + } + }); + } + } + + private NoOpClient getDeleteSnapshotRequestAssertingClient(String expectedSnapshotName) { + return new NoOpClient(getTestName()) { + @Override + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + assertThat(action.name(), is(DeleteSnapshotAction.NAME)); + assertTrue(request instanceof DeleteSnapshotRequest); + assertThat(((DeleteSnapshotRequest) request).snapshot(), equalTo(expectedSnapshotName)); + } + }; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStepTests.java index 615ad0156b6..4d97d02e9e9 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopyExecutionStateStepTests.java @@ -25,14 +25,16 @@ public class CopyExecutionStateStepTests extends AbstractStepTestCase step.performAction(originalIndexMetadata.getIndex(), originalClusterState)); assertThat(e.getMessage(), equalTo("unable to copy execution state from [" + - indexName + "] to [" + step.getShrunkIndexPrefix() + indexName + "] as target index does not exist")); + indexName + "] to [" + step.getTargetIndexPrefix() + indexName + "] as target index does not exist")); } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopySettingsStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopySettingsStepTests.java new file mode 100644 index 00000000000..e11c4a59ab3 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CopySettingsStepTests.java @@ -0,0 +1,80 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.Version; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; + +import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState; +import static org.hamcrest.Matchers.is; + +public class CopySettingsStepTests extends AbstractStepTestCase { + + @Override + protected CopySettingsStep createRandomInstance() { + return new CopySettingsStep(randomStepKey(), randomStepKey(), randomAlphaOfLengthBetween(1, 10), + IndexMetadata.SETTING_NUMBER_OF_SHARDS); + } + + @Override + protected CopySettingsStep mutateInstance(CopySettingsStep instance) { + Step.StepKey key = instance.getKey(); + Step.StepKey nextKey = instance.getNextStepKey(); + String indexPrefix = instance.getIndexPrefix(); + String[] settingsKeys = instance.getSettingsKeys(); + + switch (between(0, 3)) { + case 0: + key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + indexPrefix = randomValueOtherThan(indexPrefix, () -> randomAlphaOfLengthBetween(1, 10)); + break; + case 3: + settingsKeys = new String[]{randomAlphaOfLengthBetween(1, 10)}; + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new CopySettingsStep(key, nextKey, indexPrefix, settingsKeys); + } + + @Override + protected CopySettingsStep copyInstance(CopySettingsStep instance) { + return new CopySettingsStep(instance.getKey(), instance.getNextStepKey(), instance.getIndexPrefix(), instance.getSettingsKeys()); + } + + public void testPerformAction() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + IndexMetadata.Builder sourceIndexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + + String indexPrefix = "test-prefix-"; + String targetIndex = indexPrefix + indexName; + + IndexMetadata.Builder targetIndexMetadataBuilder = IndexMetadata.builder(targetIndex).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + + IndexMetadata sourceIndexMetadata = sourceIndexMetadataBuilder.build(); + ClusterState clusterState = ClusterState.builder(emptyClusterState()).metadata( + Metadata.builder().put(sourceIndexMetadata, true).put(targetIndexMetadataBuilder).build() + ).build(); + + CopySettingsStep copySettingsStep = new CopySettingsStep(randomStepKey(), randomStepKey(), indexPrefix, + LifecycleSettings.LIFECYCLE_NAME); + + ClusterState newClusterState = copySettingsStep.performAction(sourceIndexMetadata.getIndex(), clusterState); + IndexMetadata newTargetIndexMetadata = newClusterState.metadata().index(targetIndex); + assertThat(newTargetIndexMetadata.getSettings().get(LifecycleSettings.LIFECYCLE_NAME), is(policyName)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java new file mode 100644 index 00000000000..8b70bdb7caf --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/CreateSnapshotStepTests.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotAction; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRequest; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.ilm.Step.StepKey; + +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.is; + +public class CreateSnapshotStepTests extends AbstractStepTestCase { + + @Override + public CreateSnapshotStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + return new CreateSnapshotStep(stepKey, nextStepKey, client); + } + + @Override + protected CreateSnapshotStep copyInstance(CreateSnapshotStep instance) { + return new CreateSnapshotStep(instance.getKey(), instance.getNextStepKey(), instance.getClient()); + } + + @Override + public CreateSnapshotStep mutateInstance(CreateSnapshotStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + switch (between(0, 1)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new CreateSnapshotStep(key, nextKey, instance.getClient()); + } + + public void testPerformActionFailure() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + + { + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + Map ilmCustom = new HashMap<>(); + String repository = "repository"; + ilmCustom.put("snapshot_repository", repository); + indexMetadataBuilder.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom); + + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + CreateSnapshotStep createSnapshotStep = createRandomInstance(); + createSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + fail("expecting a failure as the index doesn't have any snapshot name in its ILM execution state"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(IllegalStateException.class)); + assertThat(e.getMessage(), + is("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]")); + } + }); + } + + { + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + CreateSnapshotStep createSnapshotStep = createRandomInstance(); + createSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + fail("expecting a failure as the index doesn't have any snapshot name in its ILM execution state"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(IllegalStateException.class)); + assertThat(e.getMessage(), + is("snapshot repository is not present for policy [" + policyName + "] and index [" + indexName + "]")); + } + }); + } + } + + public void testPerformAction() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + Map ilmCustom = new HashMap<>(); + String snapshotName = indexName + "-" + policyName; + ilmCustom.put("snapshot_name", snapshotName); + String repository = "repository"; + ilmCustom.put("snapshot_repository", repository); + + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + try (NoOpClient client = getCreateSnapshotRequestAssertingClient(repository, snapshotName, indexName)) { + CreateSnapshotStep step = new CreateSnapshotStep(randomStepKey(), randomStepKey(), client); + step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + } + + @Override + public void onFailure(Exception e) { + } + }); + } + } + + private NoOpClient getCreateSnapshotRequestAssertingClient(String expectedRepoName, String expectedSnapshotName, String indexName) { + return new NoOpClient(getTestName()) { + @Override + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + assertThat(action.name(), is(CreateSnapshotAction.NAME)); + assertTrue(request instanceof CreateSnapshotRequest); + CreateSnapshotRequest createSnapshotRequest = (CreateSnapshotRequest) request; + assertThat(createSnapshotRequest.indices().length, is(1)); + assertThat(createSnapshotRequest.indices()[0], is(indexName)); + assertThat(createSnapshotRequest.repository(), is(expectedRepoName)); + assertThat(createSnapshotRequest.snapshot(), is(expectedSnapshotName)); + assertThat(CreateSnapshotStep.NAME + " waits for the create snapshot request to complete", + createSnapshotRequest.waitForCompletion(), is(true)); + assertThat("ILM generated snapshots should not include global state", createSnapshotRequest.includeGlobalState(), + is(false)); + } + }; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteActionTests.java index 09db90ce014..bfb5cf8f9a5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/DeleteActionTests.java @@ -30,20 +30,39 @@ public class DeleteActionTests extends AbstractActionTestCase { } public void testToSteps() { - DeleteAction action = createTestInstance(); String phase = randomAlphaOfLengthBetween(1, 10); StepKey nextStepKey = new StepKey(randomAlphaOfLengthBetween(1, 10), randomAlphaOfLengthBetween(1, 10), - randomAlphaOfLengthBetween(1, 10)); - List steps = action.toSteps(null, phase, nextStepKey); - assertNotNull(steps); - assertEquals(2, steps.size()); - StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, WaitForNoFollowersStep.NAME); - StepKey expectedSecondStepKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME); - WaitForNoFollowersStep firstStep = (WaitForNoFollowersStep) steps.get(0); - DeleteStep secondStep = (DeleteStep) steps.get(1); - assertEquals(expectedFirstStepKey, firstStep.getKey()); - assertEquals(expectedSecondStepKey, firstStep.getNextStepKey()); - assertEquals(expectedSecondStepKey, secondStep.getKey()); - assertEquals(nextStepKey, secondStep.getNextStepKey()); + randomAlphaOfLengthBetween(1, 10)); + { + DeleteAction action = new DeleteAction(true); + List steps = action.toSteps(null, phase, nextStepKey); + assertNotNull(steps); + assertEquals(3, steps.size()); + StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, WaitForNoFollowersStep.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, DeleteAction.NAME, CleanupSnapshotStep.NAME); + StepKey expectedThirdKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME); + WaitForNoFollowersStep firstStep = (WaitForNoFollowersStep) steps.get(0); + CleanupSnapshotStep secondStep = (CleanupSnapshotStep) steps.get(1); + DeleteStep thirdStep = (DeleteStep) steps.get(2); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(expectedSecondStepKey, firstStep.getNextStepKey()); + assertEquals(expectedSecondStepKey, secondStep.getKey()); + assertEquals(expectedThirdKey, thirdStep.getKey()); + assertEquals(nextStepKey, thirdStep.getNextStepKey()); + } + + { + DeleteAction actionKeepsSnapshot = new DeleteAction(false); + List steps = actionKeepsSnapshot.toSteps(null, phase, nextStepKey); + StepKey expectedFirstStepKey = new StepKey(phase, DeleteAction.NAME, WaitForNoFollowersStep.NAME); + StepKey expectedSecondStepKey = new StepKey(phase, DeleteAction.NAME, DeleteStep.NAME); + assertEquals(2, steps.size()); + assertNotNull(steps); + WaitForNoFollowersStep firstStep = (WaitForNoFollowersStep) steps.get(0); + DeleteStep secondStep = (DeleteStep) steps.get(1); + assertEquals(expectedFirstStepKey, firstStep.getKey()); + assertEquals(expectedSecondStepKey, firstStep.getNextStepKey()); + assertEquals(nextStepKey, secondStep.getNextStepKey()); + } } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java new file mode 100644 index 00000000000..261e3e169e1 --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/GenerateSnapshotNameStepTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionRequestValidationException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.common.Strings; + +import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState; +import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.generateSnapshotName; +import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.validateGeneratedSnapshotName; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.hamcrest.Matchers.containsStringIgnoringCase; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.startsWith; + +public class GenerateSnapshotNameStepTests extends AbstractStepTestCase { + + @Override + protected GenerateSnapshotNameStep createRandomInstance() { + return new GenerateSnapshotNameStep(randomStepKey(), randomStepKey(), randomAlphaOfLengthBetween(5, 10)); + } + + @Override + protected GenerateSnapshotNameStep mutateInstance(GenerateSnapshotNameStep instance) { + Step.StepKey key = instance.getKey(); + Step.StepKey nextKey = instance.getNextStepKey(); + String snapshotRepository = instance.getSnapshotRepository(); + + switch (between(0, 2)) { + case 0: + key = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new Step.StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + snapshotRepository = randomValueOtherThan(snapshotRepository, () -> randomAlphaOfLengthBetween(5, 10)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new GenerateSnapshotNameStep(key, nextKey, snapshotRepository); + } + + @Override + protected GenerateSnapshotNameStep copyInstance(GenerateSnapshotNameStep instance) { + return new GenerateSnapshotNameStep(instance.getKey(), instance.getNextStepKey(), instance.getSnapshotRepository()); + } + + public void testPerformAction() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + + IndexMetadata indexMetadata = indexMetadataBuilder.build(); + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetadata, true).build()).build(); + + GenerateSnapshotNameStep generateSnapshotNameStep = createRandomInstance(); + ClusterState newClusterState = generateSnapshotNameStep.performAction(indexMetadata.getIndex(), clusterState); + + LifecycleExecutionState executionState = LifecycleExecutionState.fromIndexMetadata(newClusterState.metadata().index(indexName)); + assertThat("the " + GenerateSnapshotNameStep.NAME + " step must generate a snapshot name", executionState.getSnapshotName(), + notNullValue()); + assertThat(executionState.getSnapshotRepository(), is(generateSnapshotNameStep.getSnapshotRepository())); + assertThat(executionState.getSnapshotName(), containsStringIgnoringCase(indexName)); + assertThat(executionState.getSnapshotName(), containsStringIgnoringCase(policyName)); + } + + public void testNameGeneration() { + long time = 1552684146542L; // Fri Mar 15 2019 21:09:06 UTC + assertThat(generateSnapshotName("name"), startsWith("name-")); + assertThat(generateSnapshotName("name").length(), greaterThan("name-".length())); + + GenerateSnapshotNameStep.ResolverContext resolverContext = new GenerateSnapshotNameStep.ResolverContext(time); + assertThat(generateSnapshotName("", resolverContext), startsWith("name-2019.03.15-")); + assertThat(generateSnapshotName("", resolverContext).length(), greaterThan("name-2019.03.15-".length())); + + assertThat(generateSnapshotName("", resolverContext), startsWith("name-2019.03.01-")); + + assertThat(generateSnapshotName("", resolverContext), startsWith("name-2019-03-15.21:09:00-")); + } + + public void testNameValidation() { + assertThat(validateGeneratedSnapshotName("name-", generateSnapshotName("name-")), nullValue()); + assertThat(validateGeneratedSnapshotName("", generateSnapshotName("")), nullValue()); + + { + ActionRequestValidationException validationException = validateGeneratedSnapshotName("", generateSnapshotName("")); + assertThat(validationException, notNullValue()); + assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name []: cannot be empty")); + } + { + ActionRequestValidationException validationException = validateGeneratedSnapshotName("#start", generateSnapshotName("#start")); + assertThat(validationException, notNullValue()); + assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [#start]: must not contain '#'")); + } + { + ActionRequestValidationException validationException = validateGeneratedSnapshotName("_start", generateSnapshotName("_start")); + assertThat(validationException, notNullValue()); + assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [_start]: must not start with " + + "'_'")); + } + { + ActionRequestValidationException validationException = validateGeneratedSnapshotName("aBcD", generateSnapshotName("aBcD")); + assertThat(validationException, notNullValue()); + assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [aBcD]: must be lowercase")); + } + { + ActionRequestValidationException validationException = validateGeneratedSnapshotName("na>me", generateSnapshotName("na>me")); + assertThat(validationException, notNullValue()); + assertThat(validationException.validationErrors(), containsInAnyOrder("invalid snapshot name [na>me]: must not contain " + + "contain the following characters " + Strings.INVALID_FILENAME_CHARS)); + } + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java index 862c408e6d6..c2692731f47 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/IndexLifecycleExplainResponseTests.java @@ -57,6 +57,8 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC stepNull ? null : randomNonNegativeLong(), stepNull ? null : randomNonNegativeLong(), stepNull ? null : randomNonNegativeLong(), + stepNull ? null : randomAlphaOfLength(10), + stepNull ? null : randomAlphaOfLength(10), randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo("")); } @@ -76,6 +78,8 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC randomBoolean() ? null : randomNonNegativeLong(), randomBoolean() ? null : randomNonNegativeLong(), randomBoolean() ? null : randomNonNegativeLong(), + randomBoolean() ? null : randomAlphaOfLength(10), + randomBoolean() ? null : randomAlphaOfLength(10), randomBoolean() ? null : new BytesArray(new RandomStepInfo(() -> randomAlphaOfLength(10)).toString()), randomBoolean() ? null : PhaseExecutionInfoTests.randomPhaseExecutionInfo(""))); assertThat(exception.getMessage(), startsWith("managed index response must have complete step details")); @@ -116,11 +120,13 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC Long phaseTime = instance.getPhaseTime(); Long actionTime = instance.getActionTime(); Long stepTime = instance.getStepTime(); + String repositoryName = instance.getRepositoryName(); + String snapshotName = instance.getSnapshotName(); boolean managed = instance.managedByILM(); BytesReference stepInfo = instance.getStepInfo(); PhaseExecutionInfo phaseExecutionInfo = instance.getPhaseExecutionInfo(); if (managed) { - switch (between(0, 11)) { + switch (between(0, 13)) { case 0: index = index + randomAlphaOfLengthBetween(1, 5); break; @@ -172,11 +178,18 @@ public class IndexLifecycleExplainResponseTests extends AbstractSerializingTestC isAutoRetryableError = true; failedStepRetryCount = randomValueOtherThan(failedStepRetryCount, () -> randomInt(10)); break; + case 12: + repositoryName = randomValueOtherThan(repositoryName, () -> randomAlphaOfLengthBetween(5, 10)); + break; + case 13: + snapshotName = randomValueOtherThan(snapshotName, () -> randomAlphaOfLengthBetween(5, 10)); + break; default: throw new AssertionError("Illegal randomisation branch"); } return IndexLifecycleExplainResponse.newManagedIndexResponse(index, policy, policyTime, phase, action, step, failedStep, - isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, stepInfo, phaseExecutionInfo); + isAutoRetryableError, failedStepRetryCount, phaseTime, actionTime, stepTime, repositoryName, snapshotName, stepInfo, + phaseExecutionInfo); } else { switch (between(0, 1)) { case 0: diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateTests.java index 7a7782fb389..a7729f453b7 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecycleExecutionStateTests.java @@ -186,12 +186,14 @@ public class LifecycleExecutionStateTests extends ESTestCase { } static Map createCustomMetadata() { - String phase = randomAlphaOfLengthBetween(5,20); - String action = randomAlphaOfLengthBetween(5,20); - String step = randomAlphaOfLengthBetween(5,20); - String failedStep = randomAlphaOfLengthBetween(5,20); - String stepInfo = randomAlphaOfLengthBetween(15,50); - String phaseDefinition = randomAlphaOfLengthBetween(15,50); + String phase = randomAlphaOfLengthBetween(5, 20); + String action = randomAlphaOfLengthBetween(5, 20); + String step = randomAlphaOfLengthBetween(5, 20); + String failedStep = randomAlphaOfLengthBetween(5, 20); + String stepInfo = randomAlphaOfLengthBetween(15, 50); + String phaseDefinition = randomAlphaOfLengthBetween(15, 50); + String repositoryName = randomAlphaOfLengthBetween(10, 20); + String snapshotName = randomAlphaOfLengthBetween(10, 20); long indexCreationDate = randomLong(); long phaseTime = randomLong(); long actionTime = randomLong(); @@ -208,6 +210,8 @@ public class LifecycleExecutionStateTests extends ESTestCase { customMetadata.put("phase_time", String.valueOf(phaseTime)); customMetadata.put("action_time", String.valueOf(actionTime)); customMetadata.put("step_time", String.valueOf(stepTime)); + customMetadata.put("snapshot_repository", repositoryName); + customMetadata.put("snapshot_name", snapshotName); return customMetadata; } } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java index 9b50616a128..684b32a19a2 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/LifecyclePolicyMetadataTests.java @@ -40,6 +40,7 @@ public class LifecyclePolicyMetadataTests extends AbstractSerializingTestCase

  • TimeseriesLifecycleType.INSTANCE), new NamedWriteableRegistry.Entry(LifecycleAction.class, AllocateAction.NAME, AllocateAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, WaitForSnapshotAction.NAME, WaitForSnapshotAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, DeleteAction.NAME, DeleteAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, ForceMergeAction.NAME, ForceMergeAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, ReadOnlyAction.NAME, ReadOnlyAction::new), @@ -60,6 +61,8 @@ public class LifecyclePolicyMetadataTests extends AbstractSerializingTestCase
  • { + + private static final String RESTORED_INDEX_PREFIX = "restored-"; + + @Override + public MountSnapshotStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String restoredIndexPrefix = randomAlphaOfLength(10); + return new MountSnapshotStep(stepKey, nextStepKey, client, restoredIndexPrefix); + } + + @Override + protected MountSnapshotStep copyInstance(MountSnapshotStep instance) { + return new MountSnapshotStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), instance.getRestoredIndexPrefix()); + } + + @Override + public MountSnapshotStep mutateInstance(MountSnapshotStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String restoredIndexPrefix = instance.getRestoredIndexPrefix(); + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + restoredIndexPrefix = randomValueOtherThan(restoredIndexPrefix, () -> randomAlphaOfLengthBetween(1, 10)); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new MountSnapshotStep(key, nextKey, instance.getClient(), restoredIndexPrefix); + } + + public void testPerformActionFailure() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + + { + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + MountSnapshotStep mountSnapshotStep = createRandomInstance(); + mountSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + fail("expecting a failure as the index doesn't have any repository name in its ILM execution state"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(IllegalStateException.class)); + assertThat(e.getMessage(), + is("snapshot repository is not present for policy [" + policyName + "] and index [" + indexName + "]")); + } + }); + } + + { + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + Map ilmCustom = new HashMap<>(); + String repository = "repository"; + ilmCustom.put("snapshot_repository", repository); + indexMetadataBuilder.putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + MountSnapshotStep mountSnapshotStep = createRandomInstance(); + mountSnapshotStep.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + fail("expecting a failure as the index doesn't have any snapshot name in its ILM execution state"); + } + + @Override + public void onFailure(Exception e) { + assertThat(e, instanceOf(IllegalStateException.class)); + assertThat(e.getMessage(), + is("snapshot name was not generated for policy [" + policyName + "] and index [" + indexName + "]")); + } + }); + } + } + + public void testPerformAction() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + Map ilmCustom = new HashMap<>(); + String snapshotName = indexName + "-" + policyName; + ilmCustom.put("snapshot_name", snapshotName); + String repository = "repository"; + ilmCustom.put("snapshot_repository", repository); + + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + try (NoOpClient client = getRestoreSnapshotRequestAssertingClient(repository, snapshotName, indexName, RESTORED_INDEX_PREFIX)) { + MountSnapshotStep step = new MountSnapshotStep(randomStepKey(), randomStepKey(), client, RESTORED_INDEX_PREFIX); + step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(true)); + } + + @Override + public void onFailure(Exception e) { + fail("expecting successful response but got: [" + e.getMessage() + "]"); + } + }); + } + } + + public void testResponseStatusHandling() { + String indexName = randomAlphaOfLength(10); + String policyName = "test-ilm-policy"; + Map ilmCustom = new HashMap<>(); + String snapshotName = indexName + "-" + policyName; + ilmCustom.put("snapshot_name", snapshotName); + String repository = "repository"; + ilmCustom.put("snapshot_repository", repository); + + IndexMetadata.Builder indexMetadataBuilder = + IndexMetadata.builder(indexName).settings(settings(Version.CURRENT).put(LifecycleSettings.LIFECYCLE_NAME, policyName)) + .putCustom(LifecycleExecutionState.ILM_CUSTOM_METADATA_KEY, ilmCustom) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + IndexMetadata indexMetaData = indexMetadataBuilder.build(); + + ClusterState clusterState = + ClusterState.builder(emptyClusterState()).metadata(Metadata.builder().put(indexMetaData, true).build()).build(); + + { + RestoreSnapshotResponse responseWithOKStatus = new RestoreSnapshotResponse(new RestoreInfo("test", List.of(), 1, 1)); + try (NoOpClient clientPropagatingOKResponse = getClientTriggeringResponse(responseWithOKStatus)) { + MountSnapshotStep step = new MountSnapshotStep(randomStepKey(), randomStepKey(), clientPropagatingOKResponse, + RESTORED_INDEX_PREFIX); + step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(true)); + } + + @Override + public void onFailure(Exception e) { + fail("expecting successful response but got: [" + e.getMessage() + "]"); + } + }); + } + } + + { + RestoreSnapshotResponse responseWithACCEPTEDStatus = new RestoreSnapshotResponse((RestoreInfo) null); + try (NoOpClient clientPropagatingACCEPTEDResponse = getClientTriggeringResponse(responseWithACCEPTEDStatus)) { + MountSnapshotStep step = new MountSnapshotStep(randomStepKey(), randomStepKey(), clientPropagatingACCEPTEDResponse, + RESTORED_INDEX_PREFIX); + step.performAction(indexMetaData, clusterState, null, new AsyncActionStep.Listener() { + @Override + public void onResponse(boolean complete) { + assertThat(complete, is(true)); + } + + @Override + public void onFailure(Exception e) { + fail("expecting successful response but got: [" + e.getMessage() + "]"); + } + }); + } + } + } + + @SuppressWarnings("unchecked") + private NoOpClient getClientTriggeringResponse(RestoreSnapshotResponse response) { + return new NoOpClient(getTestName()) { + @Override + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + listener.onResponse((Response) response); + } + }; + } + + @SuppressWarnings("unchecked") + private NoOpClient getRestoreSnapshotRequestAssertingClient(String expectedRepoName, String expectedSnapshotName, String indexName, + String restoredIndexPrefix) { + return new NoOpClient(getTestName()) { + @Override + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + assertThat(action.name(), is(MountSearchableSnapshotAction.NAME)); + assertTrue(request instanceof MountSearchableSnapshotRequest); + MountSearchableSnapshotRequest mountSearchableSnapshotRequest = (MountSearchableSnapshotRequest) request; + assertThat(mountSearchableSnapshotRequest.repositoryName(), is(expectedRepoName)); + assertThat(mountSearchableSnapshotRequest.snapshotName(), is(expectedSnapshotName)); + assertThat("another ILM step will wait for the restore to complete. the " + MountSnapshotStep.NAME + " step should not", + mountSearchableSnapshotRequest.waitForCompletion(), is(false)); + assertThat(mountSearchableSnapshotRequest.ignoreIndexSettings(), is(notNullValue())); + assertThat(mountSearchableSnapshotRequest.ignoreIndexSettings()[0], is(LifecycleSettings.LIFECYCLE_NAME)); + assertThat(mountSearchableSnapshotRequest.mountedIndexName(), is(restoredIndexPrefix + indexName)); + } + }; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java new file mode 100644 index 00000000000..1c48d3cca4b --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SearchableSnapshotActionTests.java @@ -0,0 +1,76 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.xcontent.XContentParser; +import org.elasticsearch.xpack.core.ilm.Step.StepKey; + +import java.io.IOException; +import java.util.List; + +import static org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction.NAME; +import static org.hamcrest.Matchers.is; + +public class SearchableSnapshotActionTests extends AbstractActionTestCase { + + @Override + public void testToSteps() { + String phase = randomAlphaOfLengthBetween(1, 10); + StepKey expectedFirstStep = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME); + StepKey expectedSecondStep = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME); + StepKey expectedThirdStep = new StepKey(phase, NAME, CleanupSnapshotStep.NAME); + StepKey expectedFourthStep = new StepKey(phase, NAME, CreateSnapshotStep.NAME); + StepKey expectedFifthStep = new StepKey(phase, NAME, MountSnapshotStep.NAME); + StepKey expectedSixthStep = new StepKey(phase, NAME, WaitForIndexColorStep.NAME); + StepKey expectedSeventhStep = new StepKey(phase, NAME, CopyExecutionStateStep.NAME); + StepKey expectedEighthStep = new StepKey(phase, NAME, CopySettingsStep.NAME); + StepKey expectedNinthStep = new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME); + + SearchableSnapshotAction action = createTestInstance(); + StepKey nextStepKey = new StepKey(phase, randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(1, 5)); + + List steps = action.toSteps(null, phase, nextStepKey); + assertThat(steps.size(), is(9)); + + assertThat(steps.get(0).getKey(), is(expectedFirstStep)); + assertThat(steps.get(1).getKey(), is(expectedSecondStep)); + assertThat(steps.get(2).getKey(), is(expectedThirdStep)); + assertThat(steps.get(3).getKey(), is(expectedFourthStep)); + assertThat(steps.get(4).getKey(), is(expectedFifthStep)); + assertThat(steps.get(5).getKey(), is(expectedSixthStep)); + assertThat(steps.get(6).getKey(), is(expectedSeventhStep)); + assertThat(steps.get(7).getKey(), is(expectedEighthStep)); + assertThat(steps.get(8).getKey(), is(expectedNinthStep)); + + AsyncActionBranchingStep branchStep = (AsyncActionBranchingStep) steps.get(3); + assertThat(branchStep.getNextKeyOnIncompleteResponse(), is(expectedThirdStep)); + } + + @Override + protected SearchableSnapshotAction doParseInstance(XContentParser parser) throws IOException { + return SearchableSnapshotAction.parse(parser); + } + + @Override + protected SearchableSnapshotAction createTestInstance() { + return randomInstance(); + } + + @Override + protected Writeable.Reader instanceReader() { + return SearchableSnapshotAction::new; + } + + @Override + protected SearchableSnapshotAction mutateInstance(SearchableSnapshotAction instance) throws IOException { + return randomInstance(); + } + + static SearchableSnapshotAction randomInstance() { + return new SearchableSnapshotAction(randomAlphaOfLengthBetween(5, 10)); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java index b2a0dcfcc35..0e575f8482c 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/ShrinkActionTests.java @@ -174,7 +174,7 @@ public class ShrinkActionTests extends AbstractActionTestCase { assertTrue(steps.get(7) instanceof CopyExecutionStateStep); assertThat(steps.get(7).getKey(), equalTo(expectedEighthKey)); assertThat(steps.get(7).getNextStepKey(), equalTo(expectedNinthKey)); - assertThat(((CopyExecutionStateStep) steps.get(7)).getShrunkIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); + assertThat(((CopyExecutionStateStep) steps.get(7)).getTargetIndexPrefix(), equalTo(ShrinkAction.SHRUNKEN_INDEX_PREFIX)); assertTrue(steps.get(8) instanceof ShrinkSetAliasStep); assertThat(steps.get(8).getKey(), equalTo(expectedNinthKey)); diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java new file mode 100644 index 00000000000..0fc6489c22f --- /dev/null +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/SwapAliasesAndDeleteSourceIndexStepTests.java @@ -0,0 +1,135 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.core.ilm; + +import org.elasticsearch.Version; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.action.ActionType; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest; +import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.AliasMetadata; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.Metadata; +import org.elasticsearch.test.client.NoOpClient; +import org.elasticsearch.xpack.core.ilm.AsyncActionStep.Listener; +import org.elasticsearch.xpack.core.ilm.Step.StepKey; + +import java.util.Arrays; +import java.util.List; + +import static org.elasticsearch.xpack.core.ilm.AbstractStepMasterTimeoutTestCase.emptyClusterState; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; + +public class SwapAliasesAndDeleteSourceIndexStepTests extends AbstractStepTestCase { + + @Override + public SwapAliasesAndDeleteSourceIndexStep createRandomInstance() { + StepKey stepKey = randomStepKey(); + StepKey nextStepKey = randomStepKey(); + String restoredIndexPrefix = randomAlphaOfLength(10); + return new SwapAliasesAndDeleteSourceIndexStep(stepKey, nextStepKey, client, restoredIndexPrefix); + } + + @Override + protected SwapAliasesAndDeleteSourceIndexStep copyInstance(SwapAliasesAndDeleteSourceIndexStep instance) { + return new SwapAliasesAndDeleteSourceIndexStep(instance.getKey(), instance.getNextStepKey(), instance.getClient(), + instance.getTargetIndexPrefix()); + } + + @Override + public SwapAliasesAndDeleteSourceIndexStep mutateInstance(SwapAliasesAndDeleteSourceIndexStep instance) { + StepKey key = instance.getKey(); + StepKey nextKey = instance.getNextStepKey(); + String restoredIndexPrefix = instance.getTargetIndexPrefix(); + switch (between(0, 2)) { + case 0: + key = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 1: + nextKey = new StepKey(key.getPhase(), key.getAction(), key.getName() + randomAlphaOfLength(5)); + break; + case 2: + restoredIndexPrefix += randomAlphaOfLength(5); + break; + default: + throw new AssertionError("Illegal randomisation branch"); + } + return new SwapAliasesAndDeleteSourceIndexStep(key, nextKey, instance.getClient(), restoredIndexPrefix); + } + + public void testPerformAction() { + String sourceIndexName = randomAlphaOfLength(10); + IndexMetadata.Builder sourceIndexMetadataBuilder = IndexMetadata.builder(sourceIndexName).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + AliasMetadata.Builder aliasBuilder = AliasMetadata.builder(randomAlphaOfLengthBetween(3, 10)); + if (randomBoolean()) { + aliasBuilder.routing(randomAlphaOfLengthBetween(1, 10)); + } + if (randomBoolean()) { + aliasBuilder.searchRouting(randomAlphaOfLengthBetween(1, 10)); + } + if (randomBoolean()) { + aliasBuilder.indexRouting(randomAlphaOfLengthBetween(1, 10)); + } + aliasBuilder.writeIndex(randomBoolean()); + AliasMetadata aliasMetaData = aliasBuilder.build(); + IndexMetadata sourceIndexMetaData = sourceIndexMetadataBuilder.putAlias(aliasMetaData).build(); + + String targetIndexPrefix = "index_prefix"; + String targetIndexName = targetIndexPrefix + sourceIndexName; + + List expectedAliasActions = Arrays.asList( + AliasActions.removeIndex().index(sourceIndexName), + AliasActions.add().index(targetIndexName).alias(sourceIndexName), + AliasActions.add().index(targetIndexName).alias(aliasMetaData.alias()) + .searchRouting(aliasMetaData.searchRouting()).indexRouting(aliasMetaData.indexRouting()) + .writeIndex(null)); + + try (NoOpClient client = getIndicesAliasAssertingClient(expectedAliasActions)) { + SwapAliasesAndDeleteSourceIndexStep step = new SwapAliasesAndDeleteSourceIndexStep(randomStepKey(), randomStepKey(), + client, targetIndexPrefix); + + IndexMetadata.Builder targetIndexMetaDataBuilder = IndexMetadata.builder(targetIndexName).settings(settings(Version.CURRENT)) + .numberOfShards(randomIntBetween(1, 5)).numberOfReplicas(randomIntBetween(0, 5)); + + ClusterState clusterState = ClusterState.builder(emptyClusterState()) + .metadata( + Metadata.builder() + .put(sourceIndexMetaData, true) + .put(targetIndexMetaDataBuilder) + .build() + ).build(); + + step.performAction(sourceIndexMetaData, clusterState, null, new Listener() { + @Override + public void onResponse(boolean complete) { + } + + @Override + public void onFailure(Exception e) { + } + }); + } + } + + private NoOpClient getIndicesAliasAssertingClient(List expectedAliasActions) { + return new NoOpClient(getTestName()) { + @Override + protected void doExecute(ActionType action, + Request request, + ActionListener listener) { + assertThat(action.name(), is(IndicesAliasesAction.NAME)); + assertTrue(request instanceof IndicesAliasesRequest); + assertThat(((IndicesAliasesRequest) request).getAliasActions(), equalTo(expectedAliasActions)); + } + }; + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java index a9de66469a4..cfd11bafeae 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/TimeseriesLifecycleTypeTests.java @@ -45,6 +45,7 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase { private static final FreezeAction TEST_FREEZE_ACTION = new FreezeAction(); private static final SetPriorityAction TEST_PRIORITY_ACTION = new SetPriorityAction(0); private static final UnfollowAction TEST_UNFOLLOW_ACTION = new UnfollowAction(); + private static final SearchableSnapshotAction TEST_SEARCHABLE_SNAPSHOT_ACTION = new SearchableSnapshotAction("repo"); public void testValidatePhases() { boolean invalid = randomBoolean(); @@ -595,6 +596,8 @@ public class TimeseriesLifecycleTypeTests extends ESTestCase { return TEST_PRIORITY_ACTION; case UnfollowAction.NAME: return TEST_UNFOLLOW_ACTION; + case SearchableSnapshotAction.NAME: + return TEST_SEARCHABLE_SNAPSHOT_ACTION; default: throw new IllegalArgumentException("unsupported timeseries phase action [" + actionName + "]"); } diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java index 7461918a5df..dcbbd56e3a1 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/ilm/WaitForIndexColorStepTests.java @@ -37,7 +37,8 @@ public class WaitForIndexColorStepTests extends AbstractStepTestCase { + + @Override + protected Writeable.Reader instanceReader() { + return SearchableSnapshotShardStats::new; + } + + @Override + protected SearchableSnapshotShardStats createTestInstance() { + SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5)); + IndexId indexId = new IndexId(randomAlphaOfLength(5), randomAlphaOfLength(5)); + ShardRouting shardRouting = TestShardRouting.newShardRouting(randomAlphaOfLength(5), randomInt(10), randomAlphaOfLength(5), + randomBoolean(), ShardRoutingState.STARTED); + + final List inputStats = new ArrayList<>(); + for (int j = 0; j < randomInt(20); j++) { + inputStats.add(randomCacheIndexInputStats()); + } + return new SearchableSnapshotShardStats(shardRouting, snapshotId, indexId, inputStats); + } + + private CacheIndexInputStats randomCacheIndexInputStats() { + return new CacheIndexInputStats(randomAlphaOfLength(10), randomNonNegativeLong(), + randomNonNegativeLong(), randomNonNegativeLong(), + randomCounter(), randomCounter(), + randomCounter(), randomCounter(), + randomCounter(), randomCounter(), + randomCounter(), randomTimedCounter(), + randomTimedCounter(), randomTimedCounter()); + } + + private Counter randomCounter() { + return new Counter(randomLong(), randomLong(), randomLong(), randomLong()); + } + + private TimedCounter randomTimedCounter() { + return new TimedCounter(randomLong(), randomLong(), randomLong(), randomLong(), randomLong()); + } +} diff --git a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java index cf1a238236e..878b2ce56e5 100644 --- a/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java +++ b/x-pack/plugin/core/src/test/java/org/elasticsearch/xpack/core/slm/history/SnapshotHistoryStoreTests.java @@ -40,6 +40,7 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.awaitLatch; +import static org.elasticsearch.xpack.core.ilm.GenerateSnapshotNameStep.generateSnapshotName; import static org.elasticsearch.xpack.core.ilm.LifecycleSettings.SLM_HISTORY_INDEX_ENABLED_SETTING; import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.SLM_HISTORY_ALIAS; import static org.elasticsearch.xpack.core.slm.history.SnapshotHistoryStore.SLM_HISTORY_INDEX_PREFIX; @@ -75,8 +76,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { String policyId = randomAlphaOfLength(5); SnapshotLifecyclePolicy policy = randomSnapshotLifecyclePolicy(policyId); final long timestamp = randomNonNegativeLong(); - SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(timestamp); - String snapshotId = policy.generateSnapshotName(context); + String snapshotId = generateSnapshotName(policy.getName()); SnapshotHistoryItem record = SnapshotHistoryItem.creationSuccessRecord(timestamp, policy, snapshotId); client.setVerifier((a, r, l) -> { @@ -91,8 +91,7 @@ public class SnapshotHistoryStoreTests extends ESTestCase { String policyId = randomAlphaOfLength(5); SnapshotLifecyclePolicy policy = randomSnapshotLifecyclePolicy(policyId); final long timestamp = randomNonNegativeLong(); - SnapshotLifecyclePolicy.ResolverContext context = new SnapshotLifecyclePolicy.ResolverContext(timestamp); - String snapshotId = policy.generateSnapshotName(context); + String snapshotId = generateSnapshotName(policy.getName()); { SnapshotHistoryItem record = SnapshotHistoryItem.creationSuccessRecord(timestamp, policy, snapshotId); diff --git a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java index 1df915681e6..8cc22a36d7a 100644 --- a/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java +++ b/x-pack/plugin/ilm/qa/multi-node/src/test/java/org/elasticsearch/xpack/ilm/TimeSeriesLifecycleActionsIT.java @@ -8,6 +8,7 @@ package org.elasticsearch.xpack.ilm; import org.apache.http.entity.ContentType; import org.apache.http.entity.StringEntity; +import org.apache.http.util.EntityUtils; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import org.elasticsearch.client.Request; @@ -25,6 +26,7 @@ import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.common.xcontent.XContentType; import org.elasticsearch.common.xcontent.json.JsonXContent; import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.snapshots.SnapshotState; import org.elasticsearch.test.rest.ESRestTestCase; import org.elasticsearch.xpack.core.ilm.AllocateAction; import org.elasticsearch.xpack.core.ilm.DeleteAction; @@ -40,6 +42,7 @@ import org.elasticsearch.xpack.core.ilm.Phase; import org.elasticsearch.xpack.core.ilm.PhaseCompleteStep; import org.elasticsearch.xpack.core.ilm.ReadOnlyAction; import org.elasticsearch.xpack.core.ilm.RolloverAction; +import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction; import org.elasticsearch.xpack.core.ilm.SetPriorityAction; import org.elasticsearch.xpack.core.ilm.SetSingleNodeAllocateStep; import org.elasticsearch.xpack.core.ilm.ShrinkAction; @@ -55,6 +58,7 @@ import org.junit.Before; import java.io.IOException; import java.io.InputStream; +import java.util.ArrayList; import java.util.Collections; import java.util.HashMap; import java.util.List; @@ -1561,6 +1565,156 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase { assertBusy(() -> assertFalse("expected " + index + " to be deleted by ILM", indexExists(index))); } + public void testSearchableSnapshotAction() throws Exception { + String snapshotRepo = createSnapshotRepo(); + createNewSingletonPolicy("cold", new SearchableSnapshotAction(snapshotRepo)); + + createIndexWithSettings(index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy), + randomBoolean()); + + String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + this.index; + assertTrue(waitUntil(() -> { + try { + return indexExists(restoredIndexName); + } catch (IOException e) { + return false; + } + }, 30, TimeUnit.SECONDS)); + + assertBusy(() -> assertThat(explainIndex(restoredIndexName).get("step"), is(PhaseCompleteStep.NAME)), 30, TimeUnit.SECONDS); + } + + @AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/54433") + public void testDeleteActionDeletesSearchableSnapshot() throws Exception { + String snapshotRepo = createSnapshotRepo(); + + // create policy with cold and delete phases + Map coldActions = + org.elasticsearch.common.collect.Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo)); + Map phases = new HashMap<>(); + phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); + phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, + new DeleteAction(true)))); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); + // PUT policy + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request createPolicyRequest = new Request("PUT", "_ilm/policy/" + policy); + createPolicyRequest.setEntity(entity); + assertOK(client().performRequest(createPolicyRequest)); + + createIndexWithSettings(index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy), + randomBoolean()); + + String[] snapshotName = new String[1]; + String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + this.index; + assertTrue(waitUntil(() -> { + try { + Map explainIndex = explainIndex(index); + if(explainIndex == null) { + // in case we missed the original index and it was deleted + explainIndex = explainIndex(restoredIndexName); + } + snapshotName[0] = (String) explainIndex.get("snapshot_name"); + return snapshotName[0] != null; + } catch (IOException e) { + return false; + } + }, 30, TimeUnit.SECONDS)); + assertBusy(() -> assertFalse(indexExists(restoredIndexName))); + + assertTrue("the snapshot we generate in the cold phase should be deleted by the delete phase", waitUntil(() -> { + try { + Request getSnapshotsRequest = new Request("GET", "_snapshot/" + snapshotRepo + "/" + snapshotName[0]); + Response getSnapshotsResponse = client().performRequest(getSnapshotsRequest); + return EntityUtils.toString(getSnapshotsResponse.getEntity()).contains("snapshot_missing_exception"); + } catch (IOException e) { + return false; + } + }, 30, TimeUnit.SECONDS)); + } + + @SuppressWarnings("unchecked") + public void testDeleteActionDoesntDeleteSearchableSnapshot() throws Exception { + String snapshotRepo = createSnapshotRepo(); + + // create policy with cold and delete phases + Map coldActions = + org.elasticsearch.common.collect.Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo)); + Map phases = new HashMap<>(); + phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions)); + phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME, + new DeleteAction(false)))); + LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases); + // PUT policy + XContentBuilder builder = jsonBuilder(); + lifecyclePolicy.toXContent(builder, null); + final StringEntity entity = new StringEntity( + "{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON); + Request createPolicyRequest = new Request("PUT", "_ilm/policy/" + policy); + createPolicyRequest.setEntity(entity); + assertOK(client().performRequest(createPolicyRequest)); + + createIndexWithSettings(index, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put(LifecycleSettings.LIFECYCLE_NAME, policy), + randomBoolean()); + + String[] snapshotName = new String[1]; + String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + this.index; + assertTrue(waitUntil(() -> { + try { + Map explainIndex = explainIndex(index); + if(explainIndex == null) { + // in case we missed the original index and it was deleted + explainIndex = explainIndex(restoredIndexName); + } + snapshotName[0] = (String) explainIndex.get("snapshot_name"); + return snapshotName[0] != null; + } catch (IOException e) { + return false; + } + }, 30, TimeUnit.SECONDS)); + assertBusy(() -> assertFalse(indexExists(restoredIndexName))); + + assertTrue("the snapshot we generate in the cold phase should not be deleted by the delete phase", waitUntil(() -> { + try { + Request getSnapshotsRequest = new Request("GET", "_snapshot/" + snapshotRepo + "/" + snapshotName[0]); + Response getSnapshotsResponse = client().performRequest(getSnapshotsRequest); + Map snapshotsResponseMap; + try (InputStream is = getSnapshotsResponse.getEntity().getContent()) { + snapshotsResponseMap = XContentHelper.convertToMap(XContentType.JSON.xContent(), is, true); + } + if (snapshotsResponseMap.get("snapshots") != null) { + ArrayList snapshots = (ArrayList) snapshotsResponseMap.get("snapshots"); + for (Object snapshot : snapshots) { + Map snapshotInfoMap = (Map) snapshot; + if (snapshotInfoMap.get("snapshot").equals(snapshotName[0]) && + // wait for the snapshot to be completed (successfully or not) otherwise the teardown might fail + SnapshotState.valueOf((String) snapshotInfoMap.get("state")).completed()) { + return true; + } + } + } + return false; + } catch (IOException e) { + return false; + } + }, 30, TimeUnit.SECONDS)); + } + // This method should be called inside an assertBusy, it has no retry logic of its own private void assertHistoryIsPresent(String policyName, String indexName, boolean success, String stepName) throws IOException { assertHistoryIsPresent(policyName, indexName, success, null, null, stepName); diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java index 2ea3c9a3434..02c2008566d 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/IndexLifecycle.java @@ -48,6 +48,7 @@ import org.elasticsearch.xpack.core.ilm.LifecycleSettings; import org.elasticsearch.xpack.core.ilm.LifecycleType; import org.elasticsearch.xpack.core.ilm.ReadOnlyAction; import org.elasticsearch.xpack.core.ilm.RolloverAction; +import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction; import org.elasticsearch.xpack.core.ilm.SetPriorityAction; import org.elasticsearch.xpack.core.ilm.ShrinkAction; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; @@ -248,7 +249,10 @@ public class IndexLifecycle extends Plugin implements ActionPlugin { new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(FreezeAction.NAME), FreezeAction::parse), new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SetPriorityAction.NAME), SetPriorityAction::parse), new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(WaitForSnapshotAction.NAME), WaitForSnapshotAction::parse) + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(WaitForSnapshotAction.NAME), + WaitForSnapshotAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SearchableSnapshotAction.NAME), + SearchableSnapshotAction::parse) ); } diff --git a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java index 9b9035bb859..571ff9c4962 100644 --- a/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java +++ b/x-pack/plugin/ilm/src/main/java/org/elasticsearch/xpack/ilm/action/TransportExplainLifecycleAction.java @@ -121,6 +121,8 @@ public class TransportExplainLifecycleAction lifecycleState.getPhaseTime(), lifecycleState.getActionTime(), lifecycleState.getStepTime(), + lifecycleState.getSnapshotRepository(), + lifecycleState.getSnapshotName(), stepInfoBytes, phaseExecutionInfo); } else { diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java index 8b9ad139305..6c942b4d4ba 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/ilm/IndexLifecycleMetadataTests.java @@ -31,6 +31,7 @@ import org.elasticsearch.xpack.core.ilm.OperationMode; import org.elasticsearch.xpack.core.ilm.Phase; import org.elasticsearch.xpack.core.ilm.ReadOnlyAction; import org.elasticsearch.xpack.core.ilm.RolloverAction; +import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction; import org.elasticsearch.xpack.core.ilm.SetPriorityAction; import org.elasticsearch.xpack.core.ilm.ShrinkAction; import org.elasticsearch.xpack.core.ilm.TimeseriesLifecycleType; @@ -89,7 +90,8 @@ public class IndexLifecycleMetadataTests extends AbstractDiffableSerializationTe new NamedWriteableRegistry.Entry(LifecycleAction.class, ShrinkAction.NAME, ShrinkAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, FreezeAction.NAME, FreezeAction::new), new NamedWriteableRegistry.Entry(LifecycleAction.class, SetPriorityAction.NAME, SetPriorityAction::new), - new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new) + new NamedWriteableRegistry.Entry(LifecycleAction.class, UnfollowAction.NAME, UnfollowAction::new), + new NamedWriteableRegistry.Entry(LifecycleAction.class, SearchableSnapshotAction.NAME, SearchableSnapshotAction::new) )); } @@ -109,7 +111,9 @@ public class IndexLifecycleMetadataTests extends AbstractDiffableSerializationTe new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(ShrinkAction.NAME), ShrinkAction::parse), new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(FreezeAction.NAME), FreezeAction::parse), new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SetPriorityAction.NAME), SetPriorityAction::parse), - new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse) + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(UnfollowAction.NAME), UnfollowAction::parse), + new NamedXContentRegistry.Entry(LifecycleAction.class, new ParseField(SearchableSnapshotAction.NAME), + SearchableSnapshotAction::parse) )); return new NamedXContentRegistry(entries); } diff --git a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java index b3149cc177e..5b3e814d637 100644 --- a/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java +++ b/x-pack/plugin/ilm/src/test/java/org/elasticsearch/xpack/slm/SnapshotLifecyclePolicyTests.java @@ -24,8 +24,6 @@ import static org.elasticsearch.xpack.core.slm.SnapshotLifecyclePolicyMetadataTe import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.greaterThan; -import static org.hamcrest.Matchers.startsWith; public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase { @@ -42,29 +40,6 @@ public class SnapshotLifecyclePolicyTests extends AbstractSerializingTestCase", "1 * * * * ?", "repo", Collections.emptyMap(), - SnapshotRetentionConfiguration.EMPTY); - assertThat(p.generateSnapshotName(context), startsWith("name-2019.03.15-")); - assertThat(p.generateSnapshotName(context).length(), greaterThan("name-2019.03.15-".length())); - - p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap(), - SnapshotRetentionConfiguration.EMPTY); - assertThat(p.generateSnapshotName(context), startsWith("name-2019.03.01-")); - - p = new SnapshotLifecyclePolicy("id", "", "1 * * * * ?", "repo", Collections.emptyMap(), - SnapshotRetentionConfiguration.EMPTY); - assertThat(p.generateSnapshotName(context), startsWith("name-2019-03-15.21:09:00-")); - } - public void testNextExecutionTime() { SnapshotLifecyclePolicy p = new SnapshotLifecyclePolicy("id", "name", "0 1 2 3 4 ? 2099", "repo", Collections.emptyMap(), SnapshotRetentionConfiguration.EMPTY); diff --git a/x-pack/plugin/searchable-snapshots/build.gradle b/x-pack/plugin/searchable-snapshots/build.gradle new file mode 100644 index 00000000000..0ed4e994af2 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/build.gradle @@ -0,0 +1,42 @@ +evaluationDependsOn(xpackModule('core')) + +apply plugin: 'elasticsearch.esplugin' +esplugin { + name 'searchable-snapshots' + description 'A plugin for the searchable snapshots functionality' + classname 'org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots' + extendedPlugins = ['x-pack-core'] +} +archivesBaseName = 'x-pack-searchable-snapshots' + +dependencies { + compileOnly project(path: xpackModule('core'), configuration: 'default') + testCompile project(path: xpackModule('core'), configuration: 'testArtifacts') +} + +// xpack modules are installed in real clusters as the meta plugin, so +// installing them as individual plugins for integ tests doesn't make sense, +// so we disable integ tests +integTest.enabled = false + +// add all sub-projects of the qa sub-project +gradle.projectsEvaluated { + project.subprojects + .find { it.path == project.path + ":qa" } + .subprojects + .findAll { it.path.startsWith(project.path + ":qa") } + .each { check.dependsOn it.check } +} + +configurations { + testArtifacts.extendsFrom testRuntime +} + +task testJar(type: Jar) { + appendix 'test' + from sourceSets.test.output +} + +artifacts { + testArtifacts testJar +} diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle new file mode 100644 index 00000000000..ad5007a7cbc --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/azure/build.gradle @@ -0,0 +1,100 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +import org.elasticsearch.gradle.info.BuildParams +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +final Project fixture = project(':test:fixtures:azure-fixture') +final Project repositoryPlugin = project(':plugins:repository-azure') + +dependencies { + testCompile project(path: xpackModule('searchable-snapshots'), configuration: 'testArtifacts') + testCompile repositoryPlugin +} + +restResources { + restApi { + includeCore 'indices', 'search', 'bulk', 'snapshot', 'nodes', '_common' + includeXpack 'searchable_snapshots' + } +} + +boolean useFixture = false +String azureAccount = System.getenv("azure_storage_account") +String azureKey = System.getenv("azure_storage_key") +String azureContainer = System.getenv("azure_storage_container") +String azureBasePath = System.getenv("azure_storage_base_path") +String azureSasToken = System.getenv("azure_storage_sas_token") + +if (!azureAccount && !azureKey && !azureContainer && !azureBasePath && !azureSasToken) { + azureAccount = 'azure_integration_test_account' + azureKey = 'YXp1cmVfaW50ZWdyYXRpb25fdGVzdF9rZXk=' // The key is "azure_integration_test_key" encoded using base64 + azureContainer = 'container' + azureBasePath = '' + azureSasToken = '' + useFixture = true + +} else if (!azureAccount || !azureKey || !azureContainer || !azureBasePath || !azureSasToken) { + throw new IllegalArgumentException("not all options specified to run against external Azure service are present") +} + +if (useFixture) { + apply plugin: 'elasticsearch.test.fixtures' + testFixtures.useFixture(fixture.path, 'azure-fixture-other') +} + +integTest { + dependsOn repositoryPlugin.bundlePlugin + runner { + systemProperty 'test.azure.container', azureContainer + systemProperty 'test.azure.base_path', azureBasePath + "/searchable_snapshots_tests" + } +} + +testClusters.integTest { + testDistribution = 'DEFAULT' + plugin file(repositoryPlugin.bundlePlugin.archiveFile) + + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.searchable_snapshots_feature_enabled', 'true' + } + + keystore 'azure.client.searchable_snapshots.account', azureAccount + keystore 'azure.client.searchable_snapshots.key', azureKey + keystore 'azure.client.searchable_snapshots.sas_token', azureSasToken + + setting 'xpack.license.self_generated.type', 'trial' + + if (useFixture) { + def fixtureAddress = { fixtureName -> + assert useFixture: 'closure should not be used without a fixture' + int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.8091" + assert ephemeralPort > 0 + '127.0.0.1:' + ephemeralPort + } + setting 'azure.client.searchable_snapshots.endpoint_suffix', + { "ignored;DefaultEndpointsProtocol=http;BlobEndpoint=http://${-> fixtureAddress('azure-fixture-other')}" }, IGNORE_VALUE + + } else { + println "Using an external service to test " + project.name + } +} + diff --git a/x-pack/plugin/searchable-snapshots/qa/azure/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/azure/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java new file mode 100644 index 00000000000..1f5822cfff9 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/azure/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AzureSearchableSnapshotsIT.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchablesnapshots; + +import org.elasticsearch.common.settings.Settings; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; + +public class AzureSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + + @Override + protected String repositoryType() { + return "azure"; + } + + @Override + protected Settings repositorySettings() { + final String container = System.getProperty("test.azure.container"); + assertThat(container, not(blankOrNullString())); + + final String basePath = System.getProperty("test.azure.base_path"); + assertThat(basePath, not(blankOrNullString())); + + return Settings.builder().put("client", "searchable_snapshots").put("container", container).put("base_path", basePath).build(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/qa/build.gradle b/x-pack/plugin/searchable-snapshots/qa/build.gradle new file mode 100644 index 00000000000..e1c6fb4f95b --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/build.gradle @@ -0,0 +1,6 @@ +apply plugin: 'elasticsearch.build' +test.enabled = false + +dependencies { + compile project(':test:framework') +} diff --git a/x-pack/plugin/searchable-snapshots/qa/rest/build.gradle b/x-pack/plugin/searchable-snapshots/qa/rest/build.gradle new file mode 100644 index 00000000000..71bf5e8d2e3 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/rest/build.gradle @@ -0,0 +1,31 @@ +import org.elasticsearch.gradle.info.BuildParams + +apply plugin: 'elasticsearch.testclusters' +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +dependencies { + testCompile project(path: xpackModule('searchable-snapshots'), configuration: 'testArtifacts') +} + +final File repoDir = file("$buildDir/testclusters/repo") + +integTest.runner { + systemProperty 'tests.path.repo', repoDir +} + +testClusters.integTest { + testDistribution = 'DEFAULT' + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.searchable_snapshots_feature_enabled', 'true' + } + setting 'path.repo', repoDir.absolutePath + setting 'xpack.license.self_generated.type', 'trial' +} + +restResources { + restApi { + includeCore 'indices', 'search', 'bulk', 'snapshot', 'nodes', '_common' + includeXpack 'searchable_snapshots' + } +} diff --git a/x-pack/plugin/searchable-snapshots/qa/rest/src/test/java/org/elasticsearch/xpack/searchablesnapshots/rest/FsSearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/java/org/elasticsearch/xpack/searchablesnapshots/rest/FsSearchableSnapshotsIT.java new file mode 100644 index 00000000000..a3efec0c8f2 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/java/org/elasticsearch/xpack/searchablesnapshots/rest/FsSearchableSnapshotsIT.java @@ -0,0 +1,32 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.rest; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.xpack.searchablesnapshots.AbstractSearchableSnapshotsRestTestCase; + +public class FsSearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + + @Override + protected String repositoryType() { + return FsRepository.TYPE; + } + + @Override + protected Settings repositorySettings() { + final Settings.Builder settings = Settings.builder(); + settings.put("location", System.getProperty("tests.path.repo")); + if (randomBoolean()) { + settings.put("compress", randomBoolean()); + } + if (randomBoolean()) { + settings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + } + return settings.build(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/qa/rest/src/test/java/org/elasticsearch/xpack/searchablesnapshots/rest/SearchableSnapshotsClientYamlTestSuiteIT.java b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/java/org/elasticsearch/xpack/searchablesnapshots/rest/SearchableSnapshotsClientYamlTestSuiteIT.java new file mode 100644 index 00000000000..04e420bfba5 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/java/org/elasticsearch/xpack/searchablesnapshots/rest/SearchableSnapshotsClientYamlTestSuiteIT.java @@ -0,0 +1,22 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.rest; + +import com.carrotsearch.randomizedtesting.annotations.ParametersFactory; +import org.elasticsearch.test.rest.yaml.ClientYamlTestCandidate; +import org.elasticsearch.test.rest.yaml.ESClientYamlSuiteTestCase; + +public class SearchableSnapshotsClientYamlTestSuiteIT extends ESClientYamlSuiteTestCase { + + public SearchableSnapshotsClientYamlTestSuiteIT(final ClientYamlTestCandidate testCandidate) { + super(testCandidate); + } + + @ParametersFactory + public static Iterable parameters() throws Exception { + return ESClientYamlSuiteTestCase.createParameters(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/qa/rest/src/test/resources/rest-api-spec/test/clear_cache.yml b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/resources/rest-api-spec/test/clear_cache.yml new file mode 100644 index 00000000000..d90aa987333 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/resources/rest-api-spec/test/clear_cache.yml @@ -0,0 +1,134 @@ +--- +setup: + + - do: + indices.create: + index: docs + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + bulk: + body: + - index: + _index: docs + _id: 1 + - field: doc + - index: + _index: docs + _id: 2 + - field: doc + - index: + _index: docs + _id: 3 + - field: other + + - do: + snapshot.create_repository: + repository: repository-fs + body: + type: fs + settings: + location: "repository-fs" + + # Remove the snapshot if a previous test failed to delete it. + # Useful for third party tests that runs the test against a real external service. + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot + ignore: 404 + + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + + - do: + indices.delete: + index: docs + +--- +teardown: + + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot + ignore: 404 + + - do: + snapshot.delete_repository: + repository: repository-fs + +--- +"Clear searchable snapshots cache": + - skip: + version: " - 7.7.99" + reason: searchable snapshots introduced in 7.8.0 + + - do: + catch: missing + searchable_snapshots.clear_cache: {} + + - match: { error.root_cause.0.type: "resource_not_found_exception" } + - match: { error.root_cause.0.reason: "No searchable snapshots indices found" } + + - do: + catch: missing + searchable_snapshots.clear_cache: + index: _all + + - match: { error.root_cause.0.type: "resource_not_found_exception" } + - match: { error.root_cause.0.reason: "No searchable snapshots indices found" } + + - do: + catch: missing + searchable_snapshots.clear_cache: + index: "unknown" + + - do: + indices.create: + index: non_searchable_snapshot_index + + - do: + catch: missing + searchable_snapshots.clear_cache: + index: non_* + + - match: { error.root_cause.0.type: "resource_not_found_exception" } + - match: { error.root_cause.0.reason: "No searchable snapshots indices found" } + + - do: + searchable_snapshots.mount: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + body: + index: docs + + - match: { snapshot.snapshot: snapshot } + - match: { snapshot.shards.failed: 0 } + - match: { snapshot.shards.successful: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: docs + body: + query: + match: + field: "doc" + + - match: { hits.total: 2 } + + - do: + searchable_snapshots.clear_cache: + index: "docs" + + - match: { _shards.total: 1 } + - match: { _shards.failed: 0 } + diff --git a/x-pack/plugin/searchable-snapshots/qa/rest/src/test/resources/rest-api-spec/test/stats.yml b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/resources/rest-api-spec/test/stats.yml new file mode 100644 index 00000000000..6f3cd6271ee --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/rest/src/test/resources/rest-api-spec/test/stats.yml @@ -0,0 +1,212 @@ +--- +setup: + + - do: + indices.create: + index: docs + body: + settings: + number_of_shards: 1 + number_of_replicas: 0 + + - do: + bulk: + body: + - index: + _index: docs + _id: 1 + - field: foo + - index: + _index: docs + _id: 2 + - field: bar + - index: + _index: docs + _id: 3 + - field: baz + + - do: + snapshot.create_repository: + repository: repository-fs + body: + type: fs + settings: + location: "repository-fs" + + # Remove the snapshot if a previous test failed to delete it. + # Useful for third party tests that runs the test against a real external service. + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot + ignore: 404 + + - do: + snapshot.create: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + + - do: + indices.delete: + index: docs +--- +teardown: + + - do: + snapshot.delete: + repository: repository-fs + snapshot: snapshot + ignore: 404 + + - do: + snapshot.delete_repository: + repository: repository-fs + +--- +"Tests searchable snapshots stats": + - skip: + version: " - 7.7.99" + reason: searchable snapshots introduced in 7.8.0 + + - do: + catch: missing + searchable_snapshots.stats: {} + + - match: { error.root_cause.0.type: "resource_not_found_exception" } + - match: { error.root_cause.0.reason: "No searchable snapshots indices found" } + + - do: + catch: missing + searchable_snapshots.stats: + index: _all + + - match: { error.root_cause.0.type: "resource_not_found_exception" } + - match: { error.root_cause.0.reason: "No searchable snapshots indices found" } + + - do: + catch: missing + searchable_snapshots.stats: + index: "unknown" + + - do: + indices.create: + index: non_searchable_snapshot_index + + - do: + catch: missing + searchable_snapshots.stats: + index: non_* + + - match: { error.root_cause.0.type: "resource_not_found_exception" } + - match: { error.root_cause.0.reason: "No searchable snapshots indices found" } + + - do: + searchable_snapshots.mount: + repository: repository-fs + snapshot: snapshot + wait_for_completion: true + body: + index: docs + + - match: { snapshot.snapshot: snapshot } + - match: { snapshot.shards.failed: 0 } + - match: { snapshot.shards.successful: 1 } + + - do: + search: + rest_total_hits_as_int: true + index: docs + body: + query: + match_all: {} + + - match: { hits.total: 3 } + + - do: + nodes.info: {} + - set: + nodes._arbitrary_key_: node_id + + - do: + searchable_snapshots.stats: + index: "d*" + + - match: { _shards.total: 1 } + - match: { _shards.failed: 0 } + + - length: { indices: 1 } + - length: { indices.docs.shards: 1 } + - length: { indices.docs.shards.0: 1 } + - is_true: indices.docs.shards.0.0.snapshot_uuid + - is_true: indices.docs.shards.0.0.index_uuid + - match: { indices.docs.shards.0.0.shard.state: STARTED } + - match: { indices.docs.shards.0.0.shard.primary: true } + - match: { indices.docs.shards.0.0.shard.node: $node_id } + + - is_true: indices.docs.shards.0.0.files.0.name + - gt: { indices.docs.shards.0.0.files.0.length: 0 } + - gt: { indices.docs.shards.0.0.files.0.open_count: 0 } + - gt: { indices.docs.shards.0.0.files.0.close_count: 0 } + + - gte: { indices.docs.shards.0.0.files.0.contiguous_bytes_read.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.contiguous_bytes_read.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.contiguous_bytes_read.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.contiguous_bytes_read.max: 0 } + + - gte: { indices.docs.shards.0.0.files.0.non_contiguous_bytes_read.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.non_contiguous_bytes_read.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.non_contiguous_bytes_read.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.non_contiguous_bytes_read.max: 0 } + + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_read.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_read.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_read.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_read.max: 0 } + + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_written.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_written.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_written.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_written.max: 0 } + - gte: { indices.docs.shards.0.0.files.0.cached_bytes_written.time_in_nanos: 0 } + - is_false: indices.docs.shards.0.0.files.0.cached_bytes_written.time + + - gte: { indices.docs.shards.0.0.files.0.direct_bytes_read.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.direct_bytes_read.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.direct_bytes_read.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.direct_bytes_read.max: 0 } + - gte: { indices.docs.shards.0.0.files.0.direct_bytes_read.time_in_nanos: 0 } + - is_false: indices.docs.shards.0.0.files.0.direct_bytes_read.time + + - gte: { indices.docs.shards.0.0.files.0.optimized_bytes_read.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.optimized_bytes_read.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.optimized_bytes_read.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.optimized_bytes_read.max: 0 } + - gte: { indices.docs.shards.0.0.files.0.optimized_bytes_read.time_in_nanos: 0 } + - is_false: indices.docs.shards.0.0.files.0.optimized_bytes_read.time + + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.small.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.small.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.small.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.small.max: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.large.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.large.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.large.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.forward_seeks.large.max: 0 } + + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.small.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.small.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.small.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.small.max: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.large.count: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.large.sum: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.large.min: 0 } + - gte: { indices.docs.shards.0.0.files.0.backward_seeks.large.max: 0 } + + - do: + searchable_snapshots.stats: + index: "d*" + human: true + + - is_true: indices.docs.shards.0.0.files.0.cached_bytes_written.time + - is_true: indices.docs.shards.0.0.files.0.direct_bytes_read.time diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle new file mode 100644 index 00000000000..029aa8837d0 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/s3/build.gradle @@ -0,0 +1,79 @@ +import static org.elasticsearch.gradle.PropertyNormalization.IGNORE_VALUE +import org.elasticsearch.gradle.info.BuildParams + +apply plugin: 'elasticsearch.standalone-rest-test' +apply plugin: 'elasticsearch.rest-test' + +final Project fixture = project(':test:fixtures:s3-fixture') +final Project repositoryPlugin = project(':plugins:repository-s3') + +dependencies { + testCompile project(path: xpackModule('searchable-snapshots'), configuration: 'testArtifacts') + testCompile repositoryPlugin +} + +restResources { + restApi { + includeCore 'indices', 'search', 'bulk', 'snapshot', 'nodes', '_common' + includeXpack 'searchable_snapshots' + } +} + +boolean useFixture = false +String s3AccessKey = System.getenv("amazon_s3_access_key") +String s3SecretKey = System.getenv("amazon_s3_secret_key") +String s3Bucket = System.getenv("amazon_s3_bucket") +String s3BasePath = System.getenv("amazon_s3_base_path") + +if (!s3AccessKey && !s3SecretKey && !s3Bucket && !s3BasePath) { + s3AccessKey = 'access_key' + s3SecretKey = 'secret_key' + s3Bucket = 'bucket' + s3BasePath = 'base_path' + useFixture = true + +} else if (!s3AccessKey || !s3SecretKey || !s3Bucket || !s3BasePath) { + throw new IllegalArgumentException("not all options specified to run against external S3 service are present") +} + +if (useFixture) { + apply plugin: 'elasticsearch.test.fixtures' + testFixtures.useFixture(fixture.path, 's3-fixture-other') +} + +integTest { + dependsOn repositoryPlugin.bundlePlugin + runner { + systemProperty 'test.s3.bucket', s3Bucket + systemProperty 'test.s3.base_path', s3BasePath + "/searchable_snapshots_tests" + } +} + +testClusters.integTest { + testDistribution = 'DEFAULT' + plugin file(repositoryPlugin.bundlePlugin.archiveFile) + + if (BuildParams.isSnapshotBuild() == false) { + systemProperty 'es.searchable_snapshots_feature_enabled', 'true' + } + + keystore 's3.client.searchable_snapshots.access_key', s3AccessKey + keystore 's3.client.searchable_snapshots.secret_key', s3SecretKey + setting 'xpack.license.self_generated.type', 'trial' + + if (useFixture) { + def fixtureAddress = { fixtureName -> + assert useFixture: 'closure should not be used without a fixture' + int ephemeralPort = fixture.postProcessFixture.ext."test.fixtures.${fixtureName}.tcp.80" + assert ephemeralPort > 0 + '127.0.0.1:' + ephemeralPort + } + + setting 's3.client.searchable_snapshots.protocol', 'http' + setting 's3.client.searchable_snapshots.endpoint', { "${-> fixtureAddress('s3-fixture-other')}" }, IGNORE_VALUE + + } else { + println "Using an external service to test " + project.name + } +} + diff --git a/x-pack/plugin/searchable-snapshots/qa/s3/src/test/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java b/x-pack/plugin/searchable-snapshots/qa/s3/src/test/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java new file mode 100644 index 00000000000..4bcf6bd6a5c --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/qa/s3/src/test/java/org/elasticsearch/xpack/searchablesnapshots/s3/S3SearchableSnapshotsIT.java @@ -0,0 +1,31 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.s3; + +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.xpack.searchablesnapshots.AbstractSearchableSnapshotsRestTestCase; + +import static org.hamcrest.Matchers.blankOrNullString; +import static org.hamcrest.Matchers.not; + +public class S3SearchableSnapshotsIT extends AbstractSearchableSnapshotsRestTestCase { + + @Override + protected String repositoryType() { + return "s3"; + } + + @Override + protected Settings repositorySettings() { + final String bucket = System.getProperty("test.s3.bucket"); + assertThat(bucket, not(blankOrNullString())); + + final String basePath = System.getProperty("test.s3.base_path"); + assertThat(basePath, not(blankOrNullString())); + + return Settings.builder().put("client", "searchable_snapshots").put("bucket", bucket).put("base_path", basePath).build(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/BaseSearchableSnapshotIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/BaseSearchableSnapshotIndexInput.java new file mode 100644 index 00000000000..280474fbb7a --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/BaseSearchableSnapshotIndexInput.java @@ -0,0 +1,178 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.apache.lucene.store.BufferedIndexInput; +import org.apache.lucene.store.IOContext; +import org.elasticsearch.cluster.service.ClusterApplierService; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; +import org.elasticsearch.index.snapshots.blobstore.SlicedInputStream; +import org.elasticsearch.threadpool.ThreadPool; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +public abstract class BaseSearchableSnapshotIndexInput extends BufferedIndexInput { + + protected final BlobContainer blobContainer; + protected final FileInfo fileInfo; + protected final IOContext context; + protected final IndexInputStats stats; + protected final long offset; + protected final long length; + + // the following are only mutable so they can be adjusted after cloning/slicing + protected volatile boolean isClone; + private AtomicBoolean closed; + + public BaseSearchableSnapshotIndexInput( + String resourceDesc, + BlobContainer blobContainer, + FileInfo fileInfo, + IOContext context, + IndexInputStats stats, + long offset, + long length + ) { + super(resourceDesc, context); + this.blobContainer = Objects.requireNonNull(blobContainer); + this.fileInfo = Objects.requireNonNull(fileInfo); + this.context = Objects.requireNonNull(context); + assert fileInfo.metadata() + .hashEqualsContents() == false : "this method should only be used with blobs that are NOT stored in metadata's hash field " + + "(fileInfo: " + + fileInfo + + ')'; + this.stats = Objects.requireNonNull(stats); + this.offset = offset; + this.length = length; + this.closed = new AtomicBoolean(false); + this.isClone = false; + } + + public BaseSearchableSnapshotIndexInput( + String resourceDesc, + BlobContainer blobContainer, + FileInfo fileInfo, + IOContext context, + IndexInputStats stats, + long offset, + long length, + int bufferSize + ) { + this(resourceDesc, blobContainer, fileInfo, context, stats, offset, length); + setBufferSize(bufferSize); + } + + @Override + public final long length() { + return length; + } + + @Override + public BaseSearchableSnapshotIndexInput clone() { + final BaseSearchableSnapshotIndexInput clone = (BaseSearchableSnapshotIndexInput) super.clone(); + clone.closed = new AtomicBoolean(false); + clone.isClone = true; + return clone; + } + + protected void ensureOpen() throws IOException { + if (closed.get()) { + throw new IOException(toString() + " is closed"); + } + } + + @Override + public final void close() throws IOException { + if (closed.compareAndSet(false, true)) { + if (isClone == false) { + stats.incrementCloseCount(); + } + innerClose(); + } + } + + public abstract void innerClose() throws IOException; + + protected InputStream openInputStream(final long position, final long length) throws IOException { + assert assertCurrentThreadMayAccessBlobStore(); + if (fileInfo.numberOfParts() == 1L) { + assert position + length <= fileInfo.partBytes(0) : "cannot read [" + + position + + "-" + + (position + length) + + "] from [" + + fileInfo + + "]"; + return blobContainer.readBlob(fileInfo.partName(0L), position, length); + } else { + final long startPart = getPartNumberForPosition(position); + final long endPart = getPartNumberForPosition(position + length); + return new SlicedInputStream(endPart - startPart + 1L) { + @Override + protected InputStream openSlice(long slice) throws IOException { + final long currentPart = startPart + slice; + final long startInPart = (currentPart == startPart) ? getRelativePositionInPart(position) : 0L; + final long endInPart = (currentPart == endPart) + ? getRelativePositionInPart(position + length) + : getLengthOfPart(currentPart); + return blobContainer.readBlob(fileInfo.partName(currentPart), startInPart, endInPart - startInPart); + } + }; + } + } + + protected final boolean assertCurrentThreadMayAccessBlobStore() { + final String threadName = Thread.currentThread().getName(); + assert threadName.contains('[' + ThreadPool.Names.SNAPSHOT + ']') + || threadName.contains('[' + ThreadPool.Names.GENERIC + ']') + || threadName.contains('[' + ThreadPool.Names.SEARCH + ']') + || threadName.contains('[' + ThreadPool.Names.SEARCH_THROTTLED + ']') + + // Today processExistingRecoveries considers all shards and constructs a shard store snapshot on this thread, this needs + // addressing. TODO NORELEASE + || threadName.contains('[' + ThreadPool.Names.FETCH_SHARD_STORE + ']') + + // Today for as-yet-unknown reasons we sometimes try and compute the snapshot size on the cluster applier thread, which needs + // addressing. TODO NORELEASE + || threadName.contains('[' + ClusterApplierService.CLUSTER_UPDATE_THREAD_NAME + ']') + + // Unit tests access the blob store on the main test thread; simplest just to permit this rather than have them override this + // method somehow. + || threadName.startsWith("TEST-") : "current thread [" + Thread.currentThread() + "] may not read " + fileInfo; + return true; + } + + private long getPartNumberForPosition(long position) { + ensureValidPosition(position); + final long part = position / fileInfo.partSize().getBytes(); + assert part <= fileInfo.numberOfParts() : "part number [" + part + "] exceeds number of parts: " + fileInfo.numberOfParts(); + assert part >= 0L : "part number [" + part + "] is negative"; + return part; + } + + private long getRelativePositionInPart(long position) { + ensureValidPosition(position); + final long pos = position % fileInfo.partSize().getBytes(); + assert pos < fileInfo.partBytes((int) getPartNumberForPosition(pos)) : "position in part [" + pos + "] exceeds part's length"; + assert pos >= 0L : "position in part [" + pos + "] is negative"; + return pos; + } + + private long getLengthOfPart(long part) { + return fileInfo.partBytes(Math.toIntExact(part)); + } + + private void ensureValidPosition(long position) { + if (position < 0L || position > fileInfo.length()) { + throw new IllegalArgumentException("Position [" + position + "] is invalid"); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/InMemoryNoOpCommitDirectory.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/InMemoryNoOpCommitDirectory.java new file mode 100644 index 00000000000..609e872ebdb --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/InMemoryNoOpCommitDirectory.java @@ -0,0 +1,125 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.NoLockFactory; +import org.elasticsearch.core.internal.io.IOUtils; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.NoSuchFileException; +import java.util.Collection; +import java.util.Set; + +/** + * A {@link Directory} which wraps a read-only "real" directory with a wrapper that allows no-op (in-memory) commits, and peer recoveries + * of the same, so that we can start a shard on a completely readonly data set. + */ +public class InMemoryNoOpCommitDirectory extends FilterDirectory { + + private final Directory realDirectory; + + InMemoryNoOpCommitDirectory(Directory realDirectory) { + super(new ByteBuffersDirectory(NoLockFactory.INSTANCE)); + this.realDirectory = realDirectory; + } + + public Directory getRealDirectory() { + return realDirectory; + } + + @Override + public String[] listAll() throws IOException { + final String[] ephemeralFiles = in.listAll(); + final String[] realFiles = realDirectory.listAll(); + final String[] allFiles = new String[ephemeralFiles.length + realFiles.length]; + System.arraycopy(ephemeralFiles, 0, allFiles, 0, ephemeralFiles.length); + System.arraycopy(realFiles, 0, allFiles, ephemeralFiles.length, realFiles.length); + return allFiles; + } + + @Override + public void deleteFile(String name) throws IOException { + ensureMutable(name); + try { + in.deleteFile(name); + } catch (NoSuchFileException | FileNotFoundException e) { + // cannot delete the segments_N file in the read-only directory, but that's ok, just ignore this + } + } + + @Override + public long fileLength(String name) throws IOException { + try { + return in.fileLength(name); + } catch (NoSuchFileException | FileNotFoundException e) { + return realDirectory.fileLength(name); + } + } + + @Override + public void sync(Collection names) {} + + @Override + public void syncMetaData() {} + + @Override + public IndexOutput createOutput(String name, IOContext context) throws IOException { + ensureMutable(name); + return super.createOutput(name, context); + } + + @Override + public void rename(String source, String dest) throws IOException { + ensureMutable(source); + ensureMutable(dest); + super.rename(source, dest); + } + + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + throw new UnsupportedOperationException(); + } + + @Override + public IndexInput openInput(String name, IOContext context) throws IOException { + try { + return in.openInput(name, context); + } catch (NoSuchFileException | FileNotFoundException e) { + return realDirectory.openInput(name, context); + } + } + + @Override + public void close() throws IOException { + IOUtils.close(in, realDirectory); + } + + @Override + public Set getPendingDeletions() throws IOException { + return super.getPendingDeletions(); // read-only realDirectory has no pending deletions + } + + private static void ensureMutable(String name) { + if ((name.startsWith("segments_") + || name.startsWith("pending_segments_") + || name.matches("^recovery\\..*\\.segments_.*$")) == false) { + + throw new IllegalArgumentException("file [" + name + "] is not mutable"); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/IndexInputStats.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/IndexInputStats.java new file mode 100644 index 00000000000..6fa1604e98d --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/IndexInputStats.java @@ -0,0 +1,225 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.store.cache.CachedBlobContainerIndexInput; + +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.atomic.LongAdder; +import java.util.function.LongConsumer; +import java.util.function.LongSupplier; + +/** + * {@link IndexInputStats} records stats for a given {@link CachedBlobContainerIndexInput}. + */ +public class IndexInputStats { + + /* A threshold beyond which an index input seeking is counted as "large" */ + static final ByteSizeValue SEEKING_THRESHOLD = new ByteSizeValue(8, ByteSizeUnit.MB); + + private final long fileLength; + private final long seekingThreshold; + private final LongSupplier currentTimeNanos; + + private final LongAdder opened = new LongAdder(); + private final LongAdder closed = new LongAdder(); + + private final Counter forwardSmallSeeks = new Counter(); + private final Counter backwardSmallSeeks = new Counter(); + + private final Counter forwardLargeSeeks = new Counter(); + private final Counter backwardLargeSeeks = new Counter(); + + private final Counter contiguousReads = new Counter(); + private final Counter nonContiguousReads = new Counter(); + + private final TimedCounter directBytesRead = new TimedCounter(); + private final TimedCounter optimizedBytesRead = new TimedCounter(); + + private final Counter cachedBytesRead = new Counter(); + private final TimedCounter cachedBytesWritten = new TimedCounter(); + + public IndexInputStats(long fileLength, LongSupplier currentTimeNanos) { + this(fileLength, SEEKING_THRESHOLD.getBytes(), currentTimeNanos); + } + + public IndexInputStats(long fileLength, long seekingThreshold, LongSupplier currentTimeNanos) { + this.fileLength = fileLength; + this.seekingThreshold = seekingThreshold; + this.currentTimeNanos = currentTimeNanos; + } + + /** + * @return the current time in nanoseconds that should be used to measure statistics. + */ + public long currentTimeNanos() { + return currentTimeNanos.getAsLong(); + } + + public void incrementOpenCount() { + opened.increment(); + } + + public void incrementCloseCount() { + closed.increment(); + } + + public void addCachedBytesRead(int bytesRead) { + cachedBytesRead.add(bytesRead); + } + + public void addCachedBytesWritten(int bytesWritten, long nanoseconds) { + cachedBytesWritten.add(bytesWritten, nanoseconds); + } + + public void addDirectBytesRead(int bytesRead, long nanoseconds) { + directBytesRead.add(bytesRead, nanoseconds); + } + + public void addOptimizedBytesRead(int bytesRead, long nanoseconds) { + optimizedBytesRead.add(bytesRead, nanoseconds); + } + + public void incrementBytesRead(long previousPosition, long currentPosition, int bytesRead) { + LongConsumer incBytesRead = (previousPosition == currentPosition) ? contiguousReads::add : nonContiguousReads::add; + incBytesRead.accept(bytesRead); + } + + public void incrementSeeks(long currentPosition, long newPosition) { + final long delta = newPosition - currentPosition; + if (delta == 0L) { + return; + } + final boolean isLarge = isLargeSeek(delta); + if (delta > 0) { + if (isLarge) { + forwardLargeSeeks.add(delta); + } else { + forwardSmallSeeks.add(delta); + } + } else { + if (isLarge) { + backwardLargeSeeks.add(delta); + } else { + backwardSmallSeeks.add(delta); + } + } + } + + public long getFileLength() { + return fileLength; + } + + public LongAdder getOpened() { + return opened; + } + + public LongAdder getClosed() { + return closed; + } + + public Counter getForwardSmallSeeks() { + return forwardSmallSeeks; + } + + public Counter getBackwardSmallSeeks() { + return backwardSmallSeeks; + } + + public Counter getForwardLargeSeeks() { + return forwardLargeSeeks; + } + + public Counter getBackwardLargeSeeks() { + return backwardLargeSeeks; + } + + public Counter getContiguousReads() { + return contiguousReads; + } + + public Counter getNonContiguousReads() { + return nonContiguousReads; + } + + public TimedCounter getDirectBytesRead() { + return directBytesRead; + } + + public TimedCounter getOptimizedBytesRead() { + return optimizedBytesRead; + } + + public Counter getCachedBytesRead() { + return cachedBytesRead; + } + + public TimedCounter getCachedBytesWritten() { + return cachedBytesWritten; + } + + @SuppressForbidden(reason = "Handles Long.MIN_VALUE before using Math.abs()") + public boolean isLargeSeek(long delta) { + return delta != Long.MIN_VALUE && Math.abs(delta) > seekingThreshold; + } + + public static class Counter { + + private final LongAdder count = new LongAdder(); + private final LongAdder total = new LongAdder(); + private final AtomicLong min = new AtomicLong(Long.MAX_VALUE); + private final AtomicLong max = new AtomicLong(Long.MIN_VALUE); + + void add(final long value) { + count.increment(); + total.add(value); + min.updateAndGet(prev -> Math.min(prev, value)); + max.updateAndGet(prev -> Math.max(prev, value)); + } + + public long count() { + return count.sum(); + } + + public long total() { + return total.sum(); + } + + public long min() { + final long value = min.get(); + if (value == Long.MAX_VALUE) { + return 0L; + } + return value; + } + + public long max() { + final long value = max.get(); + if (value == Long.MIN_VALUE) { + return 0L; + } + return value; + } + } + + public static class TimedCounter extends Counter { + + private final LongAdder totalNanoseconds = new LongAdder(); + + void add(final long value, final long nanoseconds) { + super.add(value); + totalNanoseconds.add(nanoseconds); + } + + public long totalNanoseconds() { + return totalNanoseconds.sum(); + } + } + +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/SearchableSnapshotDirectory.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/SearchableSnapshotDirectory.java new file mode 100644 index 00000000000..9ffdd341921 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/SearchableSnapshotDirectory.java @@ -0,0 +1,393 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.store.BaseDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.SingleInstanceLockFactory; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.lucene.store.ByteArrayIndexInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.util.LazyInitializable; +import org.elasticsearch.common.util.concurrent.ConcurrentCollections; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.shard.ShardPath; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.store.cache.CacheFile; +import org.elasticsearch.index.store.cache.CacheKey; +import org.elasticsearch.index.store.cache.CachedBlobContainerIndexInput; +import org.elasticsearch.index.store.direct.DirectBlobContainerIndexInput; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; + +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Collection; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.LongSupplier; +import java.util.function.Supplier; + +import static org.apache.lucene.store.BufferedIndexInput.bufferSize; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_REPOSITORY_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING; + +/** + * Implementation of {@link Directory} that exposes files from a snapshot as a Lucene directory. Because snapshot are immutable this + * implementation does not allow modification of the directory files and only supports {@link #listAll()}, {@link #fileLength(String)} and + * {@link #openInput(String, IOContext)} methods. + * + * To create a {@link SearchableSnapshotDirectory} both the list of the snapshot files and a {@link BlobContainer} to read these files must + * be provided. The definition of the snapshot files are provided using a {@link BlobStoreIndexShardSnapshot} object which contains the name + * of the snapshot and all the files it contains along with their metadata. Because there is no one-to-one relationship between the original + * shard files and what it stored in the snapshot the {@link BlobStoreIndexShardSnapshot} is used to map a physical file name as expected by + * Lucene with the one (or the ones) corresponding blob(s) in the snapshot. + */ +public class SearchableSnapshotDirectory extends BaseDirectory { + + private final Supplier blobContainerSupplier; + private final Supplier snapshotSupplier; + private final SnapshotId snapshotId; + private final IndexId indexId; + private final ShardId shardId; + private final LongSupplier statsCurrentTimeNanosSupplier; + private final Map stats; + private final CacheService cacheService; + private final boolean useCache; + private final Set excludedFileTypes; + private final long uncachedChunkSize; // if negative use BlobContainer#readBlobPreferredLength, see #getUncachedChunkSize() + private final Path cacheDir; + private final AtomicBoolean closed; + + // volatile fields are updated once under `this` lock, all together, iff loaded is not true. + private volatile BlobStoreIndexShardSnapshot snapshot; + private volatile BlobContainer blobContainer; + private volatile boolean loaded; + + public SearchableSnapshotDirectory( + Supplier blobContainer, + Supplier snapshot, + SnapshotId snapshotId, + IndexId indexId, + ShardId shardId, + Settings indexSettings, + LongSupplier currentTimeNanosSupplier, + CacheService cacheService, + Path cacheDir + ) { + super(new SingleInstanceLockFactory()); + this.snapshotSupplier = Objects.requireNonNull(snapshot); + this.blobContainerSupplier = Objects.requireNonNull(blobContainer); + this.snapshotId = Objects.requireNonNull(snapshotId); + this.indexId = Objects.requireNonNull(indexId); + this.shardId = Objects.requireNonNull(shardId); + this.stats = ConcurrentCollections.newConcurrentMapWithAggressiveConcurrency(); + this.statsCurrentTimeNanosSupplier = Objects.requireNonNull(currentTimeNanosSupplier); + this.cacheService = Objects.requireNonNull(cacheService); + this.cacheDir = Objects.requireNonNull(cacheDir); + this.closed = new AtomicBoolean(false); + this.useCache = SNAPSHOT_CACHE_ENABLED_SETTING.get(indexSettings); + this.excludedFileTypes = new HashSet<>(SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING.get(indexSettings)); + this.uncachedChunkSize = SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING.get(indexSettings).getBytes(); + this.loaded = false; + assert invariant(); + } + + private synchronized boolean invariant() { + assert loaded != (snapshot == null); + assert loaded != (blobContainer == null); + return true; + } + + protected final boolean assertCurrentThreadMayLoadSnapshot() { + final String threadName = Thread.currentThread().getName(); + assert threadName.contains('[' + ThreadPool.Names.GENERIC + ']') + // Unit tests access the blob store on the main test thread; simplest just to permit this rather than have them override this + // method somehow. + || threadName.startsWith("TEST-") : "current thread [" + Thread.currentThread() + "] may not load " + snapshotId; + return true; + } + + /** + * Loads the snapshot if and only if it the snapshot is not loaded yet. + * + * @return true if the snapshot was loaded by executing this method, false otherwise + */ + public boolean loadSnapshot() { + boolean alreadyLoaded = this.loaded; + if (alreadyLoaded == false) { + synchronized (this) { + alreadyLoaded = this.loaded; + if (alreadyLoaded == false) { + this.blobContainer = blobContainerSupplier.get(); + this.snapshot = snapshotSupplier.get(); + this.loaded = true; + } + } + } + assert assertCurrentThreadMayLoadSnapshot(); + assert invariant(); + return alreadyLoaded == false; + } + + @Nullable + public BlobContainer blobContainer() { + final BlobContainer blobContainer = this.blobContainer; + assert blobContainer != null; + return blobContainer; + } + + @Nullable + public BlobStoreIndexShardSnapshot snapshot() { + final BlobStoreIndexShardSnapshot snapshot = this.snapshot; + assert snapshot != null; + return snapshot; + } + + private List files() { + if (loaded == false) { + return org.elasticsearch.common.collect.List.of(); + } + final List files = snapshot().indexFiles(); + assert files != null; + assert files.size() > 0; + return files; + } + + public SnapshotId getSnapshotId() { + return snapshotId; + } + + public IndexId getIndexId() { + return indexId; + } + + public ShardId getShardId() { + return shardId; + } + + public Map getStats() { + return Collections.unmodifiableMap(stats); + } + + @Nullable + public IndexInputStats getStats(String fileName) { + return stats.get(fileName); + } + + private BlobStoreIndexShardSnapshot.FileInfo fileInfo(final String name) throws FileNotFoundException { + return files().stream() + .filter(fileInfo -> fileInfo.physicalName().equals(name)) + .findFirst() + .orElseThrow(() -> new FileNotFoundException(name)); + } + + @Override + public final String[] listAll() { + ensureOpen(); + return files().stream().map(BlobStoreIndexShardSnapshot.FileInfo::physicalName).sorted(String::compareTo).toArray(String[]::new); + } + + @Override + public final long fileLength(final String name) throws IOException { + ensureOpen(); + return fileInfo(name).length(); + } + + @Override + public Set getPendingDeletions() { + throw unsupportedException(); + } + + @Override + public void sync(Collection names) { + throw unsupportedException(); + } + + @Override + public void syncMetaData() { + throw unsupportedException(); + } + + @Override + public void deleteFile(String name) { + throw unsupportedException(); + } + + @Override + public IndexOutput createOutput(String name, IOContext context) { + throw unsupportedException(); + } + + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw unsupportedException(); + } + + @Override + public void rename(String source, String dest) { + throw unsupportedException(); + } + + private static UnsupportedOperationException unsupportedException() { + assert false : "this operation is not supported and should have not be called"; + return new UnsupportedOperationException("Searchable snapshot directory does not support this operation"); + } + + @Override + public final void close() { + if (closed.compareAndSet(false, true)) { + isOpen = false; + // Ideally we could let the cache evict/remove cached files by itself after the + // directory has been closed. + clearCache(); + } + } + + public void clearCache() { + cacheService.removeFromCache(cacheKey -> cacheKey.belongsTo(snapshotId, indexId, shardId)); + } + + protected IndexInputStats createIndexInputStats(final long fileLength) { + return new IndexInputStats(fileLength, statsCurrentTimeNanosSupplier); + } + + public CacheKey createCacheKey(String fileName) { + return new CacheKey(snapshotId, indexId, shardId, fileName); + } + + public CacheFile getCacheFile(CacheKey cacheKey, long fileLength) throws Exception { + return cacheService.get(cacheKey, fileLength, cacheDir); + } + + @Override + public IndexInput openInput(final String name, final IOContext context) throws IOException { + ensureOpen(); + + final BlobStoreIndexShardSnapshot.FileInfo fileInfo = fileInfo(name); + if (fileInfo.metadata().hashEqualsContents()) { + final BytesRef content = fileInfo.metadata().hash(); + return new ByteArrayIndexInput("ByteArrayIndexInput(" + name + ')', content.bytes, content.offset, content.length); + } + + final IndexInputStats inputStats = stats.computeIfAbsent(name, n -> createIndexInputStats(fileInfo.length())); + if (useCache && isExcludedFromCache(name) == false) { + return new CachedBlobContainerIndexInput(this, fileInfo, context, inputStats); + } else { + return new DirectBlobContainerIndexInput( + blobContainer(), + fileInfo, + context, + inputStats, + getUncachedChunkSize(), + bufferSize(context) + ); + } + } + + private long getUncachedChunkSize() { + if (uncachedChunkSize < 0) { + return blobContainer().readBlobPreferredLength(); + } else { + return uncachedChunkSize; + } + } + + private boolean isExcludedFromCache(String name) { + final String ext = IndexFileNames.getExtension(name); + return ext != null && excludedFileTypes.contains(ext); + } + + @Override + public String toString() { + return this.getClass().getSimpleName() + "@snapshotId=" + snapshotId + " lockFactory=" + lockFactory; + } + + public static Directory create( + RepositoriesService repositories, + CacheService cache, + IndexSettings indexSettings, + ShardPath shardPath, + LongSupplier currentTimeNanosSupplier + ) throws IOException { + + final Repository repository = repositories.repository(SNAPSHOT_REPOSITORY_SETTING.get(indexSettings.getSettings())); + if (repository instanceof BlobStoreRepository == false) { + throw new IllegalArgumentException("Repository [" + repository + "] is not searchable"); + } + final BlobStoreRepository blobStoreRepository = (BlobStoreRepository) repository; + + final IndexId indexId = new IndexId(indexSettings.getIndex().getName(), SNAPSHOT_INDEX_ID_SETTING.get(indexSettings.getSettings())); + final SnapshotId snapshotId = new SnapshotId( + SNAPSHOT_SNAPSHOT_NAME_SETTING.get(indexSettings.getSettings()), + SNAPSHOT_SNAPSHOT_ID_SETTING.get(indexSettings.getSettings()) + ); + + final LazyInitializable lazyBlobContainer = new LazyInitializable<>( + () -> blobStoreRepository.shardContainer(indexId, shardPath.getShardId().id()) + ); + final LazyInitializable lazySnapshot = new LazyInitializable<>( + () -> blobStoreRepository.loadShardSnapshot(lazyBlobContainer.getOrCompute(), snapshotId) + ); + + final Path cacheDir = shardPath.getDataPath().resolve("snapshots").resolve(snapshotId.getUUID()); + Files.createDirectories(cacheDir); + + return new InMemoryNoOpCommitDirectory( + new SearchableSnapshotDirectory( + lazyBlobContainer::getOrCompute, + lazySnapshot::getOrCompute, + snapshotId, + indexId, + shardPath.getShardId(), + indexSettings.getSettings(), + currentTimeNanosSupplier, + cache, + cacheDir + ) + ); + } + + public static SearchableSnapshotDirectory unwrapDirectory(Directory dir) { + while (dir != null) { + if (dir instanceof SearchableSnapshotDirectory) { + return (SearchableSnapshotDirectory) dir; + } else if (dir instanceof InMemoryNoOpCommitDirectory) { + dir = ((InMemoryNoOpCommitDirectory) dir).getRealDirectory(); + } else if (dir instanceof FilterDirectory) { + dir = ((FilterDirectory) dir).getDelegate(); + } else { + dir = null; + } + } + return null; + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CacheFile.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CacheFile.java new file mode 100644 index 00000000000..a141804d7f9 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CacheFile.java @@ -0,0 +1,290 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.apache.lucene.store.AlreadyClosedException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedBiFunction; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.util.concurrent.AbstractRefCounted; +import org.elasticsearch.common.util.concurrent.ReleasableLock; + +import java.io.IOException; +import java.io.UncheckedIOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Objects; +import java.util.Set; +import java.util.concurrent.CompletableFuture; +import java.util.concurrent.locks.ReentrantReadWriteLock; + +public class CacheFile { + + @FunctionalInterface + public interface EvictionListener { + void onEviction(CacheFile evictedCacheFile); + } + + private static final StandardOpenOption[] OPEN_OPTIONS = new StandardOpenOption[] { + StandardOpenOption.READ, + StandardOpenOption.WRITE, + StandardOpenOption.CREATE, + StandardOpenOption.SPARSE }; + + private final AbstractRefCounted refCounter = new AbstractRefCounted("CacheFile") { + @Override + protected void closeInternal() { + CacheFile.this.finishEviction(); + } + }; + + private final ReleasableLock evictionLock; + private final ReleasableLock readLock; + + private final SparseFileTracker tracker; + private final int rangeSize; + private final String description; + private final Path file; + + private volatile Set listeners; + private volatile boolean evicted; + + @Nullable // if evicted, or there are no listeners + private volatile FileChannel channel; + + public CacheFile(String description, long length, Path file, int rangeSize) { + this.tracker = new SparseFileTracker(file.toString(), length); + this.description = Objects.requireNonNull(description); + this.file = Objects.requireNonNull(file); + this.listeners = new HashSet<>(); + this.rangeSize = rangeSize; + this.evicted = false; + + final ReentrantReadWriteLock cacheLock = new ReentrantReadWriteLock(); + this.evictionLock = new ReleasableLock(cacheLock.writeLock()); + this.readLock = new ReleasableLock(cacheLock.readLock()); + + assert invariant(); + } + + public long getLength() { + return tracker.getLength(); + } + + public Path getFile() { + return file; + } + + ReleasableLock fileLock() { + try (ReleasableLock ignored = evictionLock.acquire()) { + ensureOpen(); + // check if we have a channel under eviction lock + if (channel == null) { + throw new AlreadyClosedException("Cache file channel has been released and closed"); + } + // acquire next read lock while holding the eviction lock + // makes sure that channel won't be closed until this + // read lock is released + return readLock.acquire(); + } + } + + @Nullable + public FileChannel getChannel() { + return channel; + } + + public boolean acquire(final EvictionListener listener) throws IOException { + assert listener != null; + + ensureOpen(); + boolean success = false; + if (refCounter.tryIncRef()) { + try (ReleasableLock ignored = evictionLock.acquire()) { + try { + ensureOpen(); + final Set newListeners = new HashSet<>(listeners); + final boolean added = newListeners.add(listener); + assert added : "listener already exists " + listener; + maybeOpenFileChannel(newListeners); + listeners = Collections.unmodifiableSet(newListeners); + success = true; + } finally { + if (success == false) { + refCounter.decRef(); + } + } + } + } + assert invariant(); + return success; + } + + public boolean release(final EvictionListener listener) { + assert listener != null; + + boolean success = false; + try (ReleasableLock ignored = evictionLock.acquire()) { + try { + final Set newListeners = new HashSet<>(listeners); + final boolean removed = newListeners.remove(Objects.requireNonNull(listener)); + assert removed : "listener does not exist " + listener; + if (removed == false) { + throw new IllegalStateException("Cannot remove an unknown listener"); + } + maybeCloseFileChannel(newListeners); + listeners = Collections.unmodifiableSet(newListeners); + success = true; + } finally { + if (success) { + refCounter.decRef(); + } + } + } + assert invariant(); + return success; + } + + private void finishEviction() { + assert evictionLock.isHeldByCurrentThread(); + assert listeners.isEmpty(); + assert channel == null; + try { + Files.deleteIfExists(file); + } catch (IOException e) { + throw new UncheckedIOException(e); + } + } + + public void startEviction() { + if (evicted == false) { + final Set evictionListeners = new HashSet<>(); + try (ReleasableLock ignored = evictionLock.acquire()) { + if (evicted == false) { + evicted = true; + evictionListeners.addAll(listeners); + refCounter.decRef(); + } + } + evictionListeners.forEach(listener -> listener.onEviction(this)); + } + assert invariant(); + } + + private void maybeOpenFileChannel(Set listeners) throws IOException { + assert evictionLock.isHeldByCurrentThread(); + if (listeners.size() == 1) { + assert channel == null; + channel = FileChannel.open(file, OPEN_OPTIONS); + } + } + + private void maybeCloseFileChannel(Set listeners) { + assert evictionLock.isHeldByCurrentThread(); + if (listeners.size() == 0) { + assert channel != null; + try { + channel.close(); + } catch (IOException e) { + throw new UncheckedIOException("Exception when closing channel", e); + } finally { + channel = null; + } + } + } + + private boolean invariant() { + try (ReleasableLock ignored = readLock.acquire()) { + assert listeners != null; + if (listeners.isEmpty()) { + assert channel == null; + assert evicted == false || refCounter.refCount() != 0 || Files.notExists(file); + } else { + assert channel != null; + assert refCounter.refCount() > 0; + assert channel.isOpen(); + assert Files.exists(file); + } + } + return true; + } + + @Override + public String toString() { + return "CacheFile{" + + "desc='" + + description + + "', file=" + + file + + ", length=" + + tracker.getLength() + + ", channel=" + + (channel != null ? "yes" : "no") + + ", listeners=" + + listeners.size() + + ", evicted=" + + evicted + + ", tracker=" + + tracker + + '}'; + } + + private void ensureOpen() { + if (evicted) { + throw new AlreadyClosedException("Cache file is evicted"); + } + } + + CompletableFuture fetchRange( + long position, + CheckedBiFunction onRangeAvailable, + CheckedBiConsumer onRangeMissing + ) { + final CompletableFuture future = new CompletableFuture<>(); + try { + if (position < 0 || position > tracker.getLength()) { + throw new IllegalArgumentException("Wrong read position [" + position + "]"); + } + + ensureOpen(); + final long rangeStart = (position / rangeSize) * rangeSize; + final long rangeEnd = Math.min(rangeStart + rangeSize, tracker.getLength()); + + final List gaps = tracker.waitForRange( + rangeStart, + rangeEnd, + ActionListener.wrap( + rangeReady -> future.complete(onRangeAvailable.apply(rangeStart, rangeEnd)), + rangeFailure -> future.completeExceptionally(rangeFailure) + ) + ); + + if (gaps.size() > 0) { + final SparseFileTracker.Gap range = gaps.get(0); + assert gaps.size() == 1 : "expected 1 range to fetch but got " + gaps.size(); + assert range.start == rangeStart : "range/gap start mismatch (" + range.start + ',' + rangeStart + ')'; + assert range.end == rangeEnd : "range/gap end mismatch (" + range.end + ',' + rangeEnd + ')'; + + try { + ensureOpen(); + onRangeMissing.accept(rangeStart, rangeEnd); + range.onResponse(null); + } catch (Exception e) { + range.onFailure(e); + } + } + } catch (Exception e) { + future.completeExceptionally(e); + } + return future; + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CacheKey.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CacheKey.java new file mode 100644 index 00000000000..a84b28ca537 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CacheKey.java @@ -0,0 +1,74 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotId; + +import java.util.Objects; + +public class CacheKey { + + private final SnapshotId snapshotId; + private final IndexId indexId; + private final ShardId shardId; + private final String fileName; + + public CacheKey(SnapshotId snapshotId, IndexId indexId, ShardId shardId, String fileName) { + this.snapshotId = Objects.requireNonNull(snapshotId); + this.indexId = Objects.requireNonNull(indexId); + this.shardId = Objects.requireNonNull(shardId); + this.fileName = Objects.requireNonNull(fileName); + } + + SnapshotId getSnapshotId() { + return snapshotId; + } + + IndexId getIndexId() { + return indexId; + } + + ShardId getShardId() { + return shardId; + } + + String getFileName() { + return fileName; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final CacheKey cacheKey = (CacheKey) o; + return Objects.equals(snapshotId, cacheKey.snapshotId) + && Objects.equals(indexId, cacheKey.indexId) + && Objects.equals(shardId, cacheKey.shardId) + && Objects.equals(fileName, cacheKey.fileName); + } + + @Override + public int hashCode() { + return Objects.hash(snapshotId, indexId, shardId, fileName); + } + + @Override + public String toString() { + return "[" + "snapshotId=" + snapshotId + ", indexId=" + indexId + ", shardId=" + shardId + ", fileName='" + fileName + "']"; + } + + public boolean belongsTo(SnapshotId snapshotId, IndexId indexId, ShardId shardId) { + return Objects.equals(this.snapshotId, snapshotId) + && Objects.equals(this.indexId, indexId) + && Objects.equals(this.shardId, shardId); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInput.java new file mode 100644 index 00000000000..5e43183b4e8 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInput.java @@ -0,0 +1,343 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.index.store.cache; + +import org.apache.logging.log4j.LogManager; +import org.apache.logging.log4j.Logger; +import org.apache.logging.log4j.message.ParameterizedMessage; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.SuppressForbidden; +import org.elasticsearch.common.io.Channels; +import org.elasticsearch.common.util.concurrent.ReleasableLock; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; +import org.elasticsearch.index.store.BaseSearchableSnapshotIndexInput; +import org.elasticsearch.index.store.IndexInputStats; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; + +import java.io.EOFException; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.util.Locale; +import java.util.concurrent.atomic.AtomicReference; + +public class CachedBlobContainerIndexInput extends BaseSearchableSnapshotIndexInput { + + private static final Logger logger = LogManager.getLogger(CachedBlobContainerIndexInput.class); + private static final int COPY_BUFFER_SIZE = 8192; + + private final SearchableSnapshotDirectory directory; + private final CacheFileReference cacheFileReference; + + // last read position is kept around in order to detect (non)contiguous reads for stats + private long lastReadPosition; + // last seek position is kept around in order to detect forward/backward seeks for stats + private long lastSeekPosition; + + public CachedBlobContainerIndexInput( + SearchableSnapshotDirectory directory, + FileInfo fileInfo, + IOContext context, + IndexInputStats stats + ) { + this( + "CachedBlobContainerIndexInput(" + fileInfo.physicalName() + ")", + directory, + fileInfo, + context, + stats, + 0L, + fileInfo.length(), + new CacheFileReference(directory, fileInfo.physicalName(), fileInfo.length()) + ); + stats.incrementOpenCount(); + } + + private CachedBlobContainerIndexInput( + String resourceDesc, + SearchableSnapshotDirectory directory, + FileInfo fileInfo, + IOContext context, + IndexInputStats stats, + long offset, + long length, + CacheFileReference cacheFileReference + ) { + super(resourceDesc, directory.blobContainer(), fileInfo, context, stats, offset, length); + this.directory = directory; + this.cacheFileReference = cacheFileReference; + this.lastReadPosition = this.offset; + this.lastSeekPosition = this.offset; + } + + @Override + public void innerClose() { + if (isClone == false) { + cacheFileReference.releaseOnClose(); + } + } + + @Override + protected void readInternal(final byte[] buffer, final int offset, final int length) throws IOException { + final long position = getFilePointer() + this.offset; + + int totalBytesRead = 0; + while (totalBytesRead < length) { + final long pos = position + totalBytesRead; + final int off = offset + totalBytesRead; + final int len = length - totalBytesRead; + + int bytesRead = 0; + try { + final CacheFile cacheFile = cacheFileReference.get(); + if (cacheFile == null) { + throw new AlreadyClosedException("Failed to acquire a non-evicted cache file"); + } + + try (ReleasableLock ignored = cacheFile.fileLock()) { + bytesRead = cacheFile.fetchRange( + pos, + (start, end) -> readCacheFile(cacheFile.getChannel(), end, pos, buffer, off, len), + (start, end) -> writeCacheFile(cacheFile.getChannel(), start, end) + ).get(); + } + } catch (final Exception e) { + if (e instanceof AlreadyClosedException || (e.getCause() != null && e.getCause() instanceof AlreadyClosedException)) { + try { + // cache file was evicted during the range fetching, read bytes directly from source + bytesRead = readDirectly(pos, pos + len, buffer, off); + continue; + } catch (Exception inner) { + e.addSuppressed(inner); + } + } + throw new IOException("Fail to read data from cache", e); + + } finally { + totalBytesRead += bytesRead; + } + } + assert totalBytesRead == length : "partial read operation, read [" + totalBytesRead + "] bytes of [" + length + "]"; + stats.incrementBytesRead(lastReadPosition, position, totalBytesRead); + lastReadPosition = position + totalBytesRead; + lastSeekPosition = lastReadPosition; + } + + private int readCacheFile(FileChannel fc, long end, long position, byte[] buffer, int offset, long length) throws IOException { + assert assertFileChannelOpen(fc); + int bytesRead = Channels.readFromFileChannel(fc, position, buffer, offset, Math.toIntExact(Math.min(length, end - position))); + stats.addCachedBytesRead(bytesRead); + return bytesRead; + } + + @SuppressForbidden(reason = "Use positional writes on purpose") + private void writeCacheFile(FileChannel fc, long start, long end) throws IOException { + assert assertFileChannelOpen(fc); + final long length = end - start; + final byte[] copyBuffer = new byte[Math.toIntExact(Math.min(COPY_BUFFER_SIZE, length))]; + logger.trace(() -> new ParameterizedMessage("writing range [{}-{}] to cache file [{}]", start, end, cacheFileReference)); + + int bytesCopied = 0; + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream input = openInputStream(start, length)) { + long remaining = end - start; + while (remaining > 0) { + final int len = (remaining < copyBuffer.length) ? Math.toIntExact(remaining) : copyBuffer.length; + int bytesRead = input.read(copyBuffer, 0, len); + if (bytesRead == -1) { + throw new EOFException( + String.format( + Locale.ROOT, + "unexpected EOF reading [%d-%d] ([%d] bytes remaining) from %s", + start, + end, + remaining, + cacheFileReference + ) + ); + } + fc.write(ByteBuffer.wrap(copyBuffer, 0, bytesRead), start + bytesCopied); + bytesCopied += bytesRead; + remaining -= bytesRead; + } + final long endTimeNanos = stats.currentTimeNanos(); + stats.addCachedBytesWritten(bytesCopied, endTimeNanos - startTimeNanos); + } + } + + @Override + protected void seekInternal(long pos) throws IOException { + if (pos > length()) { + throw new EOFException("Reading past end of file [position=" + pos + ", length=" + length() + "] for " + toString()); + } else if (pos < 0L) { + throw new IOException("Seeking to negative position [" + pos + "] for " + toString()); + } + final long position = pos + this.offset; + stats.incrementSeeks(lastSeekPosition, position); + lastSeekPosition = position; + } + + @Override + public CachedBlobContainerIndexInput clone() { + return (CachedBlobContainerIndexInput) super.clone(); + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) { + if (offset < 0 || length < 0 || offset + length > length()) { + throw new IllegalArgumentException( + "slice() " + + sliceDescription + + " out of bounds: offset=" + + offset + + ",length=" + + length + + ",fileLength=" + + length() + + ": " + + this + ); + } + final CachedBlobContainerIndexInput slice = new CachedBlobContainerIndexInput( + getFullSliceDescription(sliceDescription), + directory, + fileInfo, + context, + stats, + this.offset + offset, + length, + cacheFileReference + ); + slice.isClone = true; + return slice; + } + + @Override + public String toString() { + return "CachedBlobContainerIndexInput{" + + "cacheFileReference=" + + cacheFileReference + + ", offset=" + + offset + + ", length=" + + length() + + ", position=" + + getFilePointer() + + '}'; + } + + private int readDirectly(long start, long end, byte[] buffer, int offset) throws IOException { + final long length = end - start; + final byte[] copyBuffer = new byte[Math.toIntExact(Math.min(COPY_BUFFER_SIZE, length))]; + logger.trace(() -> new ParameterizedMessage("direct reading of range [{}-{}] for cache file [{}]", start, end, cacheFileReference)); + + int bytesCopied = 0; + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream input = openInputStream(start, length)) { + long remaining = end - start; + while (remaining > 0) { + final int len = (remaining < copyBuffer.length) ? (int) remaining : copyBuffer.length; + int bytesRead = input.read(copyBuffer, 0, len); + if (bytesRead == -1) { + throw new EOFException( + String.format( + Locale.ROOT, + "unexpected EOF reading [%d-%d] ([%d] bytes remaining) from %s", + start, + end, + remaining, + cacheFileReference + ) + ); + } + System.arraycopy(copyBuffer, 0, buffer, offset + bytesCopied, bytesRead); + bytesCopied += bytesRead; + remaining -= bytesRead; + } + final long endTimeNanos = stats.currentTimeNanos(); + stats.addDirectBytesRead(bytesCopied, endTimeNanos - startTimeNanos); + } + return bytesCopied; + } + + private static class CacheFileReference implements CacheFile.EvictionListener { + + private final long fileLength; + private final CacheKey cacheKey; + private final SearchableSnapshotDirectory directory; + private final AtomicReference cacheFile = new AtomicReference<>(); // null if evicted or not yet acquired + + private CacheFileReference(SearchableSnapshotDirectory directory, String fileName, long fileLength) { + this.cacheKey = directory.createCacheKey(fileName); + this.fileLength = fileLength; + this.directory = directory; + } + + @Nullable + CacheFile get() throws Exception { + CacheFile currentCacheFile = cacheFile.get(); + if (currentCacheFile != null) { + return currentCacheFile; + } + + final CacheFile newCacheFile = directory.getCacheFile(cacheKey, fileLength); + synchronized (this) { + currentCacheFile = cacheFile.get(); + if (currentCacheFile != null) { + return currentCacheFile; + } + if (newCacheFile.acquire(this)) { + final CacheFile previousCacheFile = cacheFile.getAndSet(newCacheFile); + assert previousCacheFile == null; + return newCacheFile; + } + } + return null; + } + + @Override + public void onEviction(final CacheFile evictedCacheFile) { + synchronized (this) { + if (cacheFile.compareAndSet(evictedCacheFile, null)) { + evictedCacheFile.release(this); + } + } + } + + void releaseOnClose() { + synchronized (this) { + final CacheFile currentCacheFile = cacheFile.getAndSet(null); + if (currentCacheFile != null) { + currentCacheFile.release(this); + } + } + } + + @Override + public String toString() { + return "CacheFileReference{" + + "cacheKey='" + + cacheKey + + '\'' + + ", fileLength=" + + fileLength + + ", acquired=" + + (cacheFile.get() != null) + + '}'; + } + } + + private static boolean assertFileChannelOpen(FileChannel fileChannel) { + assert fileChannel != null; + assert fileChannel.isOpen(); + return true; + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/SparseFileTracker.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/SparseFileTracker.java new file mode 100644 index 00000000000..7b04ff7e3b4 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/cache/SparseFileTracker.java @@ -0,0 +1,326 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.support.GroupedActionListener; +import org.elasticsearch.action.support.PlainListenableActionFuture; +import org.elasticsearch.common.Nullable; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; + +/** + * Keeps track of the contents of a file that may not be completely present. + */ +public class SparseFileTracker { + + /** + * The byte ranges of the file which are present or pending. These ranges are nonempty, disjoint (and in order) and the non-pending + * ranges are not contiguous (i.e. contiguous non-pending ranges are merged together). See {@link SparseFileTracker#invariant()} for + * details. + */ + private final TreeSet ranges = new TreeSet<>(Comparator.comparingLong(r -> r.start)); + + private final Object mutex = new Object(); + + private final String description; + + private final long length; + + public SparseFileTracker(String description, long length) { + this.description = description; + this.length = length; + if (length < 0) { + throw new IllegalArgumentException("Length [" + length + "] must be equal to or greater than 0 for [" + description + "]"); + } + } + + public long getLength() { + return length; + } + + /** + * @return the sum of the length of the ranges + */ + private long computeLengthOfRanges() { + assert Thread.holdsLock(mutex) : "sum of length of the ranges must be computed under mutex"; + return ranges.stream().mapToLong(range -> range.end - range.start).sum(); + } + + /** + * Called before reading a range from the file to ensure that this range is present. Returns a list of gaps for the caller to fill. + * + * @param start The (inclusive) start of the desired range + * @param end The (exclusive) end of the desired range + * @param listener Listener for when this range is fully available + * @return A collection of gaps that the client should fill in to satisfy this range + * @throws IllegalArgumentException if invalid range is requested + */ + public List waitForRange(final long start, final long end, final ActionListener listener) { + if (end < start || start < 0L || length < end) { + throw new IllegalArgumentException("invalid range [start=" + start + ", end=" + end + ", length=" + length + "]"); + } + + final List gaps = new ArrayList<>(); + synchronized (mutex) { + assert invariant(); + + final List pendingRanges = new ArrayList<>(); + + final Range targetRange = new Range(start, end, null); + final SortedSet earlierRanges = ranges.headSet(targetRange, false); // ranges with strictly earlier starts + if (earlierRanges.isEmpty() == false) { + final Range lastEarlierRange = earlierRanges.last(); + if (start < lastEarlierRange.end) { + if (lastEarlierRange.isPending()) { + pendingRanges.add(lastEarlierRange); + } + targetRange.start = Math.min(end, lastEarlierRange.end); + } + } + + while (targetRange.start < end) { + assert 0 <= targetRange.start : targetRange; + assert invariant(); + + final SortedSet existingRanges = ranges.tailSet(targetRange); + if (existingRanges.isEmpty()) { + final Range newPendingRange = new Range(targetRange.start, end, PlainListenableActionFuture.newListenableFuture()); + ranges.add(newPendingRange); + pendingRanges.add(newPendingRange); + gaps.add(new Gap(targetRange.start, end)); + targetRange.start = end; + } else { + final Range firstExistingRange = existingRanges.first(); + assert targetRange.start <= firstExistingRange.start : targetRange + " vs " + firstExistingRange; + + if (targetRange.start == firstExistingRange.start) { + if (firstExistingRange.isPending()) { + pendingRanges.add(firstExistingRange); + } + targetRange.start = Math.min(end, firstExistingRange.end); + } else { + final Range newPendingRange = new Range( + targetRange.start, + Math.min(end, firstExistingRange.start), + PlainListenableActionFuture.newListenableFuture() + ); + + ranges.add(newPendingRange); + pendingRanges.add(newPendingRange); + gaps.add(new Gap(targetRange.start, newPendingRange.end)); + targetRange.start = newPendingRange.end; + } + } + } + assert targetRange.start == targetRange.end : targetRange; + assert targetRange.start == end : targetRange; + assert invariant(); + + if (pendingRanges.isEmpty() == false) { + assert ranges.containsAll(pendingRanges) : ranges + " vs " + pendingRanges; + assert pendingRanges.stream().allMatch(Range::isPending) : pendingRanges; + + if (pendingRanges.size() == 1) { + assert gaps.size() <= 1 : gaps; + pendingRanges.get(0).completionListener.addListener(listener); + } else { + final GroupedActionListener groupedActionListener = new GroupedActionListener<>( + ActionListener.map(listener, ignored -> null), + pendingRanges.size() + ); + pendingRanges.forEach(pendingRange -> pendingRange.completionListener.addListener(groupedActionListener)); + } + + return Collections.unmodifiableList(gaps); + } + } + + assert gaps.isEmpty(); // or else pendingRanges.isEmpty() == false so we already returned + listener.onResponse(null); + return Collections.emptyList(); + } + + private void onGapSuccess(final long start, final long end) { + final PlainListenableActionFuture completionListener; + + synchronized (mutex) { + assert invariant(); + + final Range range = new Range(start, end, null); + final SortedSet existingRanges = ranges.tailSet(range); + assert existingRanges.isEmpty() == false; + + final Range existingRange = existingRanges.first(); + assert existingRange.start == start && existingRange.end == end && existingRange.isPending(); + completionListener = existingRange.completionListener; + ranges.remove(existingRange); + + final SortedSet prevRanges = ranges.headSet(existingRange); + final Range prevRange = prevRanges.isEmpty() ? null : prevRanges.last(); + assert prevRange == null || prevRange.end <= existingRange.start : prevRange + " vs " + existingRange; + final boolean mergeWithPrev = prevRange != null && prevRange.isPending() == false && prevRange.end == existingRange.start; + + final SortedSet nextRanges = ranges.tailSet(existingRange); + final Range nextRange = nextRanges.isEmpty() ? null : nextRanges.first(); + assert nextRange == null || existingRange.end <= nextRange.start : existingRange + " vs " + nextRange; + final boolean mergeWithNext = nextRange != null && nextRange.isPending() == false && existingRange.end == nextRange.start; + + if (mergeWithPrev && mergeWithNext) { + assert prevRange.isPending() == false : prevRange; + assert nextRange.isPending() == false : nextRange; + assert prevRange.end == existingRange.start : prevRange + " vs " + existingRange; + assert existingRange.end == nextRange.start : existingRange + " vs " + nextRange; + prevRange.end = nextRange.end; + ranges.remove(nextRange); + } else if (mergeWithPrev) { + assert prevRange.isPending() == false : prevRange; + assert prevRange.end == existingRange.start : prevRange + " vs " + existingRange; + prevRange.end = existingRange.end; + } else if (mergeWithNext) { + assert nextRange.isPending() == false : nextRange; + assert existingRange.end == nextRange.start : existingRange + " vs " + nextRange; + nextRange.start = existingRange.start; + } else { + ranges.add(new Range(start, end, null)); + } + + assert invariant(); + } + + completionListener.onResponse(null); + } + + private void onGapFailure(long start, long end, Exception e) { + final PlainListenableActionFuture completionListener; + + synchronized (mutex) { + assert invariant(); + + final Range range = new Range(start, end, null); + final SortedSet existingRanges = ranges.tailSet(range); + assert existingRanges.isEmpty() == false; + + final Range existingRange = existingRanges.first(); + assert existingRange.start == start && existingRange.end == end && existingRange.isPending(); + completionListener = existingRange.completionListener; + ranges.remove(existingRange); + + assert invariant(); + } + + completionListener.onFailure(e); + } + + private boolean invariant() { + long lengthOfRanges = 0L; + Range previousRange = null; + for (final Range range : ranges) { + if (previousRange != null) { + // ranges are nonempty + assert range.start < range.end : range; + + // ranges are disjoint + assert previousRange.end <= range.start : previousRange + " vs " + range; + + // contiguous, non-pending ranges are merged together + assert previousRange.isPending() || range.isPending() || previousRange.end < range.start : previousRange + " vs " + range; + + } + + // range never exceed maximum length + assert range.end <= length; + + lengthOfRanges += range.end - range.start; + previousRange = range; + } + + // sum of ranges lengths never exceed maximum length + assert computeLengthOfRanges() <= length; + + // computed length of ranges is equal to the sum of range lengths + assert computeLengthOfRanges() == lengthOfRanges; + + return true; + } + + @Override + public String toString() { + return "SparseFileTracker[" + description + ']'; + } + + /** + * Represents a gap in the file that a client should fill in. + */ + public class Gap implements ActionListener { + /** + * Inclusive start point of this range + */ + public final long start; + + /** + * Exclusive end point of this range + */ + public final long end; + + Gap(long start, long end) { + assert start < end : start + "-" + end; + this.start = start; + this.end = end; + } + + @Override + public void onResponse(Void aVoid) { + onGapSuccess(start, end); + } + + @Override + public void onFailure(Exception e) { + onGapFailure(start, end, e); + } + + @Override + public String toString() { + return SparseFileTracker.this.toString() + " [" + start + "-" + end + "]"; + } + } + + private static class Range { + /** + * Inclusive start point of this range + */ + long start; + + /** + * Exclusive end point of this range + */ + long end; + + @Nullable // if not pending + final PlainListenableActionFuture completionListener; + + Range(long start, long end, PlainListenableActionFuture completionListener) { + assert start <= end : start + "-" + end; + this.start = start; + this.end = end; + this.completionListener = completionListener; + } + + boolean isPending() { + return completionListener != null; + } + + @Override + public String toString() { + return "[" + start + "-" + end + (isPending() ? ", pending]" : "]"); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInput.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInput.java new file mode 100644 index 00000000000..5366e1bd1f9 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInput.java @@ -0,0 +1,399 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.direct; + +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.common.CheckedRunnable; +import org.elasticsearch.common.CheckedSupplier; +import org.elasticsearch.common.Nullable; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; +import org.elasticsearch.index.store.BaseSearchableSnapshotIndexInput; +import org.elasticsearch.index.store.IndexInputStats; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; +import java.util.concurrent.atomic.LongAdder; + +/** + * A {@link DirectBlobContainerIndexInput} instance corresponds to a single file from a Lucene directory that has been snapshotted. Because + * large Lucene file might be split into multiple parts during the snapshot, {@link DirectBlobContainerIndexInput} requires a + * {@link FileInfo} object at creation time. This object is used to retrieve the file name and length of the original Lucene file, as well + * as all the parts (stored as "blobs" in the repository) that composed the file in the snapshot. + * + * For example, the following {@link FileInfo}: + * [name: __4vdpz_HFQ8CuKjCERX0o2A, numberOfParts: 2, partSize: 997b, partBytes: 997, metadata: name [_0_Asserting_0.pos], length [1413] + * + * Indicates that the Lucene file "_0_Asserting_0.pos" has a total length of 1413 and is snapshotted into 2 parts: + * - __4vdpz_HFQ8CuKjCERX0o2A.part1 of size 997b + * - __4vdpz_HFQ8CuKjCERX0o2A.part2 of size 416b + * + * {@link DirectBlobContainerIndexInput} maintains a global position that indicates the current position in the Lucene file where the + * next read will occur. In the case of a Lucene file snapshotted into multiple parts, this position is used to identify which part must + * be read at which position (see {@link #readInternal(byte[], int, int)}. This position is also passed over to cloned and sliced input + * along with the {@link FileInfo} so that they can also track their reading position. + * + * The {@code sequentialReadSize} constructor parameter configures the {@link DirectBlobContainerIndexInput} to perform a larger read on the + * underlying {@link BlobContainer} than it needs in order to fill its internal buffer, on the assumption that the client is reading + * sequentially from this file and will consume the rest of this stream in due course. It keeps hold of the partially-consumed + * {@link InputStream} in {@code streamForSequentialReads}. Clones and slices, however, do not expect to be read sequentially and so make + * a new request to the {@link BlobContainer} each time their internal buffer needs refilling. + */ +public class DirectBlobContainerIndexInput extends BaseSearchableSnapshotIndexInput { + + private long position; + + @Nullable // if not currently reading sequentially + private StreamForSequentialReads streamForSequentialReads; + private long sequentialReadSize; + private static final long NO_SEQUENTIAL_READ_OPTIMIZATION = 0L; + + public DirectBlobContainerIndexInput( + BlobContainer blobContainer, + FileInfo fileInfo, + IOContext context, + IndexInputStats stats, + long sequentialReadSize, + int bufferSize + ) { + this( + "DirectBlobContainerIndexInput(" + fileInfo.physicalName() + ")", + blobContainer, + fileInfo, + context, + stats, + 0L, + 0L, + fileInfo.length(), + sequentialReadSize, + bufferSize + ); + stats.incrementOpenCount(); + } + + private DirectBlobContainerIndexInput( + String resourceDesc, + BlobContainer blobContainer, + FileInfo fileInfo, + IOContext context, + IndexInputStats stats, + long position, + long offset, + long length, + long sequentialReadSize, + int bufferSize + ) { + super(resourceDesc, blobContainer, fileInfo, context, stats, offset, length, bufferSize); + this.position = position; + assert sequentialReadSize >= 0; + this.sequentialReadSize = sequentialReadSize; + } + + @Override + protected void readInternal(byte[] b, int offset, int length) throws IOException { + ensureOpen(); + if (fileInfo.numberOfParts() == 1L) { + readInternalBytes(0, position, b, offset, length); + } else { + int len = length; + int off = offset; + while (len > 0) { + int currentPart = Math.toIntExact(position / fileInfo.partSize().getBytes()); + int remainingBytesInPart; + if (currentPart < (fileInfo.numberOfParts() - 1)) { + remainingBytesInPart = Math.toIntExact(((currentPart + 1L) * fileInfo.partSize().getBytes()) - position); + } else { + remainingBytesInPart = Math.toIntExact(fileInfo.length() - position); + } + final int read = Math.min(len, remainingBytesInPart); + readInternalBytes(currentPart, position % fileInfo.partSize().getBytes(), b, off, read); + len -= read; + off += read; + } + } + } + + private void readInternalBytes(final int part, long pos, final byte[] b, int offset, int length) throws IOException { + int optimizedReadSize = readOptimized(part, pos, b, offset, length); + assert optimizedReadSize <= length; + position += optimizedReadSize; + + if (optimizedReadSize < length) { + // we did not read everything in an optimized fashion, so read the remainder directly + final long startTimeNanos = stats.currentTimeNanos(); + try (InputStream inputStream = openBlobStream(part, pos + optimizedReadSize, length - optimizedReadSize)) { + final int directReadSize = readFully( + inputStream, + b, + offset + optimizedReadSize, + length - optimizedReadSize, + () -> { throw new EOFException("Read past EOF at [" + position + "] with length [" + fileInfo.partBytes(part) + "]"); } + ); + assert optimizedReadSize + directReadSize == length : optimizedReadSize + " and " + directReadSize + " vs " + length; + position += directReadSize; + final long endTimeNanos = stats.currentTimeNanos(); + stats.addDirectBytesRead(directReadSize, endTimeNanos - startTimeNanos); + } + } + } + + /** + * Attempt to satisfy this read in an optimized fashion using {@code streamForSequentialReadsRef}. + * @return the number of bytes read + */ + private int readOptimized(int part, long pos, byte[] b, int offset, int length) throws IOException { + if (sequentialReadSize == NO_SEQUENTIAL_READ_OPTIMIZATION) { + return 0; + } + + int read = 0; + if (streamForSequentialReads == null) { + // starting a new sequential read + read = readFromNewSequentialStream(part, pos, b, offset, length); + } else if (streamForSequentialReads.canContinueSequentialRead(part, pos)) { + // continuing a sequential read that we started previously + read = streamForSequentialReads.read(b, offset, length); + if (streamForSequentialReads.isFullyRead()) { + // the current stream was exhausted by this read, so it should be closed + streamForSequentialReads.close(); + streamForSequentialReads = null; + } else { + // the current stream contained enough data for this read and more besides, so we leave it in place + assert read == length : length + " remaining"; + } + + if (read < length) { + // the current stream didn't contain enough data for this read, so we must read more + read += readFromNewSequentialStream(part, pos + read, b, offset + read, length - read); + } + } else { + // not a sequential read, so stop optimizing for this usage pattern and fall through to the unoptimized behaviour + assert streamForSequentialReads.isFullyRead() == false; + sequentialReadSize = NO_SEQUENTIAL_READ_OPTIMIZATION; + closeStreamForSequentialReads(); + } + return read; + } + + private void closeStreamForSequentialReads() throws IOException { + try { + IOUtils.close(streamForSequentialReads); + } finally { + streamForSequentialReads = null; + } + } + + /** + * If appropriate, open a new stream for sequential reading and satisfy the given read using it. + * @return the number of bytes read; if a new stream wasn't opened then nothing was read so the caller should perform the read directly. + */ + private int readFromNewSequentialStream(int part, long pos, byte[] b, int offset, int length) throws IOException { + + assert streamForSequentialReads == null : "should only be called when a new stream is needed"; + assert sequentialReadSize > 0L : "should only be called if optimizing sequential reads"; + + final long streamLength = Math.min(sequentialReadSize, fileInfo.partBytes(part) - pos); + if (streamLength <= length) { + // streamLength <= length so this single read will consume the entire stream, so there is no need to keep hold of it, so we can + // tell the caller to read the data directly + return 0; + } + + // if we open a stream of length streamLength then it will not be completely consumed by this read, so it is worthwhile to open + // it and keep it open for future reads + final InputStream inputStream = openBlobStream(part, pos, streamLength); + streamForSequentialReads = new StreamForSequentialReads(new FilterInputStream(inputStream) { + private LongAdder bytesRead = new LongAdder(); + private LongAdder timeNanos = new LongAdder(); + + private int onOptimizedRead(CheckedSupplier read) throws IOException { + final long startTimeNanos = stats.currentTimeNanos(); + final int result = read.get(); + final long endTimeNanos = stats.currentTimeNanos(); + if (result != -1) { + bytesRead.add(result); + timeNanos.add(endTimeNanos - startTimeNanos); + } + return result; + } + + @Override + public int read() throws IOException { + return onOptimizedRead(super::read); + } + + @Override + public int read(byte[] b, int off, int len) throws IOException { + return onOptimizedRead(() -> super.read(b, off, len)); + } + + @Override + public void close() throws IOException { + super.close(); + stats.addOptimizedBytesRead(Math.toIntExact(bytesRead.sumThenReset()), timeNanos.sumThenReset()); + } + }, part, pos, streamLength); + + final int read = streamForSequentialReads.read(b, offset, length); + assert read == length : read + " vs " + length; + assert streamForSequentialReads.isFullyRead() == false; + return read; + } + + @Override + protected void seekInternal(long pos) throws IOException { + if (pos > length()) { + throw new EOFException("Reading past end of file [position=" + pos + ", length=" + length() + "] for " + toString()); + } else if (pos < 0L) { + throw new IOException("Seeking to negative position [" + pos + "] for " + toString()); + } + if (position != offset + pos) { + position = offset + pos; + closeStreamForSequentialReads(); + } + } + + @Override + public DirectBlobContainerIndexInput clone() { + final DirectBlobContainerIndexInput clone = new DirectBlobContainerIndexInput( + "clone(" + this + ")", + blobContainer, + fileInfo, + context, + stats, + position, + offset, + length, + // Clones might not be closed when they are no longer needed, but we must always close streamForSequentialReads. The simple + // solution: do not optimize sequential reads on clones. + NO_SEQUENTIAL_READ_OPTIMIZATION, + getBufferSize() + ); + clone.isClone = true; + return clone; + } + + @Override + public IndexInput slice(String sliceDescription, long offset, long length) throws IOException { + if ((offset >= 0L) && (length >= 0L) && (offset + length <= length())) { + final DirectBlobContainerIndexInput slice = new DirectBlobContainerIndexInput( + sliceDescription, + blobContainer, + fileInfo, + context, + stats, + position, + this.offset + offset, + length, + // Slices might not be closed when they are no longer needed, but we must always close streamForSequentialReads. The simple + // solution: do not optimize sequential reads on slices. + NO_SEQUENTIAL_READ_OPTIMIZATION, + getBufferSize() + ); + slice.isClone = true; + slice.seek(0L); + return slice; + } else { + throw new IllegalArgumentException( + "slice() " + + sliceDescription + + " out of bounds: offset=" + + offset + + ",length=" + + length + + ",fileLength=" + + length() + + ": " + + this + ); + } + } + + @Override + public void innerClose() throws IOException { + closeStreamForSequentialReads(); + } + + @Override + public String toString() { + return "DirectBlobContainerIndexInput{" + + "resourceDesc=" + + super.toString() + + ", fileInfo=" + + fileInfo + + ", offset=" + + offset + + ", length=" + + length() + + ", position=" + + position + + '}'; + } + + private InputStream openBlobStream(int part, long pos, long length) throws IOException { + assert assertCurrentThreadMayAccessBlobStore(); + return blobContainer.readBlob(fileInfo.partName(part), pos, length); + } + + /** + * Fully read up to {@code length} bytes from the given {@link InputStream} + */ + private static int readFully(InputStream inputStream, byte[] b, int offset, int length, CheckedRunnable onEOF) + throws IOException { + int totalRead = 0; + while (totalRead < length) { + final int read = inputStream.read(b, offset + totalRead, length - totalRead); + if (read == -1) { + onEOF.run(); + break; + } + totalRead += read; + } + return totalRead > 0 ? totalRead : -1; + } + + private static class StreamForSequentialReads implements Closeable { + private final InputStream inputStream; + private final int part; + private long pos; // position within this part + private final long maxPos; + + StreamForSequentialReads(InputStream inputStream, int part, long pos, long streamLength) { + this.inputStream = Objects.requireNonNull(inputStream); + this.part = part; + this.pos = pos; + this.maxPos = pos + streamLength; + } + + boolean canContinueSequentialRead(int part, long pos) { + return this.part == part && this.pos == pos; + } + + int read(byte[] b, int offset, int length) throws IOException { + assert this.pos < maxPos : "should not try and read from a fully-read stream"; + final int read = readFully(inputStream, b, offset, length, () -> {}); + assert read <= length : read + " vs " + length; + pos += read; + return read; + } + + boolean isFullyRead() { + assert this.pos <= maxPos; + return this.pos >= maxPos; + } + + @Override + public void close() throws IOException { + inputStream.close(); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotAllocator.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotAllocator.java new file mode 100644 index 00000000000..f274c44415f --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotAllocator.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.cluster.routing.RoutingNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.UnassignedInfo; +import org.elasticsearch.cluster.routing.allocation.AllocateUnassignedDecision; +import org.elasticsearch.cluster.routing.allocation.AllocationDecision; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.routing.allocation.FailedShard; +import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult; +import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; +import org.elasticsearch.cluster.routing.allocation.decider.Decision; + +import java.util.ArrayList; +import java.util.List; + +public class SearchableSnapshotAllocator implements ExistingShardsAllocator { + + public static final String ALLOCATOR_NAME = "searchable_snapshot_allocator"; + + @Override + public void beforeAllocation(RoutingAllocation allocation) {} + + @Override + public void afterPrimariesBeforeReplicas(RoutingAllocation allocation) {} + + @Override + public void allocateUnassigned( + ShardRouting shardRouting, + RoutingAllocation allocation, + UnassignedAllocationHandler unassignedAllocationHandler + ) { + final AllocateUnassignedDecision allocateUnassignedDecision = decideAllocation(allocation, shardRouting); + assert allocateUnassignedDecision.isDecisionTaken(); + + if (allocateUnassignedDecision.getAllocationDecision() == AllocationDecision.YES) { + if (shardRouting.primary() && shardRouting.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE) { + // we don't care what the allocation ID is since we know that these shards cannot really be stale, so we can + // safely ignore the allocation ID with a forced-stale allocation + unassignedAllocationHandler.updateUnassigned( + shardRouting.unassignedInfo(), + RecoverySource.ExistingStoreRecoverySource.FORCE_STALE_PRIMARY_INSTANCE, + allocation.changes() + ); + } + unassignedAllocationHandler.initialize(allocateUnassignedDecision.getTargetNode().getId(), null, 0L, allocation.changes()); + } else { + unassignedAllocationHandler.removeAndIgnore(allocateUnassignedDecision.getAllocationStatus(), allocation.changes()); + } + } + + private static AllocateUnassignedDecision decideAllocation(RoutingAllocation allocation, ShardRouting shardRouting) { + assert shardRouting.unassigned(); + assert ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.get( + allocation.metadata().getIndexSafe(shardRouting.index()).getSettings() + ).equals(ALLOCATOR_NAME); + + Decision.Type bestDecision = Decision.Type.NO; + RoutingNode bestNode = null; + final List nodeAllocationResults = allocation.debugDecision() + ? new ArrayList<>(allocation.routingNodes().size()) + : null; + + for (final RoutingNode routingNode : allocation.routingNodes()) { + final Decision decision = allocation.deciders().canAllocate(shardRouting, routingNode, allocation); + if (decision.type() == Decision.Type.YES || (decision.type() == Decision.Type.THROTTLE && bestDecision != Decision.Type.YES)) { + bestDecision = decision.type(); + bestNode = routingNode; + } + if (nodeAllocationResults != null) { + nodeAllocationResults.add(new NodeAllocationResult(routingNode.node(), null, decision)); + } + } + + if (bestDecision == Decision.Type.YES) { + return AllocateUnassignedDecision.yes(bestNode.node(), null, nodeAllocationResults, false); + } else if (bestDecision == Decision.Type.THROTTLE) { + return AllocateUnassignedDecision.throttle(nodeAllocationResults); + } else { + return AllocateUnassignedDecision.no(UnassignedInfo.AllocationStatus.DECIDERS_NO, nodeAllocationResults); + } + } + + @Override + public AllocateUnassignedDecision explainUnassignedShardAllocation(ShardRouting shardRouting, RoutingAllocation routingAllocation) { + assert shardRouting.unassigned(); + assert routingAllocation.debugDecision(); + return decideAllocation(routingAllocation, shardRouting); + } + + @Override + public void cleanCaches() {} + + @Override + public void applyStartedShards(List startedShards, RoutingAllocation allocation) {} + + @Override + public void applyFailedShards(List failedShards, RoutingAllocation allocation) {} + + @Override + public int getNumberOfInFlightFetches() { + return 0; + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotIndexEventListener.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotIndexEventListener.java new file mode 100644 index 00000000000..887f942f235 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotIndexEventListener.java @@ -0,0 +1,59 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.apache.lucene.index.SegmentInfos; +import org.elasticsearch.cluster.routing.RecoverySource; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.IndexEventListener; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.index.translog.TranslogException; +import org.elasticsearch.threadpool.ThreadPool; + +import java.nio.file.Path; + +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.isSearchableSnapshotStore; + +public class SearchableSnapshotIndexEventListener implements IndexEventListener { + + @Override + public void beforeIndexShardRecovery(IndexShard indexShard, IndexSettings indexSettings) { + assert Thread.currentThread().getName().contains(ThreadPool.Names.GENERIC); + ensureSnapshotIsLoaded(indexShard); + associateNewEmptyTranslogWithIndex(indexShard); + } + + private static void ensureSnapshotIsLoaded(IndexShard indexShard) { + final SearchableSnapshotDirectory directory = SearchableSnapshotDirectory.unwrapDirectory(indexShard.store().directory()); + assert directory != null; + + final boolean success = directory.loadSnapshot(); + assert directory.listAll().length > 0 : "expecting directory listing to be non-empty"; + assert success + || indexShard.routingEntry() + .recoverySource() + .getType() == RecoverySource.Type.PEER : "loading snapshot must not be called twice unless we are retrying a peer recovery"; + } + + private static void associateNewEmptyTranslogWithIndex(IndexShard indexShard) { + final ShardId shardId = indexShard.shardId(); + assert isSearchableSnapshotStore(indexShard.indexSettings().getSettings()) : "Expected a searchable snapshot shard " + shardId; + try { + final SegmentInfos segmentInfos = indexShard.store().readLastCommittedSegmentsInfo(); + final long localCheckpoint = Long.parseLong(segmentInfos.userData.get(SequenceNumbers.LOCAL_CHECKPOINT_KEY)); + final long primaryTerm = indexShard.getPendingPrimaryTerm(); + final String translogUUID = segmentInfos.userData.get(Translog.TRANSLOG_UUID_KEY); + final Path translogLocation = indexShard.shardPath().resolveTranslog(); + Translog.createEmptyTranslog(translogLocation, shardId, localCheckpoint, primaryTerm, translogUUID, null); + } catch (Exception e) { + throw new TranslogException(shardId, "failed to associate a new translog", e); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java new file mode 100644 index 00000000000..35ea168dfd9 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshots.java @@ -0,0 +1,276 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.Build; +import org.elasticsearch.action.ActionRequest; +import org.elasticsearch.action.ActionResponse; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.node.DiscoveryNodes; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.NamedWriteableRegistry; +import org.elasticsearch.common.settings.ClusterSettings; +import org.elasticsearch.common.settings.IndexScopedSettings; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.settings.SettingsFilter; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.env.Environment; +import org.elasticsearch.env.NodeEnvironment; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.engine.EngineFactory; +import org.elasticsearch.index.engine.ReadOnlyEngine; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; +import org.elasticsearch.index.translog.TranslogStats; +import org.elasticsearch.license.License; +import org.elasticsearch.license.LicenseUtils; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.plugins.ActionPlugin; +import org.elasticsearch.plugins.ClusterPlugin; +import org.elasticsearch.plugins.EnginePlugin; +import org.elasticsearch.plugins.IndexStorePlugin; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.plugins.RepositoryPlugin; +import org.elasticsearch.repositories.RepositoriesModule; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.rest.RestController; +import org.elasticsearch.rest.RestHandler; +import org.elasticsearch.script.ScriptService; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.watcher.ResourceWatcherService; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsAction; +import org.elasticsearch.xpack.searchablesnapshots.action.TransportClearSearchableSnapshotsCacheAction; +import org.elasticsearch.xpack.searchablesnapshots.action.TransportMountSearchableSnapshotAction; +import org.elasticsearch.xpack.searchablesnapshots.action.TransportSearchableSnapshotsStatsAction; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; +import org.elasticsearch.xpack.searchablesnapshots.rest.RestClearSearchableSnapshotsCacheAction; +import org.elasticsearch.xpack.searchablesnapshots.rest.RestMountSearchableSnapshotAction; +import org.elasticsearch.xpack.searchablesnapshots.rest.RestSearchableSnapshotsStatsAction; + +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.Optional; +import java.util.function.Function; +import java.util.function.Supplier; + +import static java.util.Collections.emptyList; +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; + +/** + * Plugin for Searchable Snapshots feature + */ +public class SearchableSnapshots extends Plugin implements IndexStorePlugin, RepositoryPlugin, EnginePlugin, ActionPlugin, ClusterPlugin { + + private static final boolean SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED; + + static { + final String property = System.getProperty("es.searchable_snapshots_feature_enabled"); + if ("true".equals(property)) { + SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED = true; + } else if ("false".equals(property)) { + SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED = false; + } else if (property == null) { + SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED = Build.CURRENT.isSnapshot(); + } else { + throw new IllegalArgumentException( + "expected es.searchable_snapshots_feature_enabled to be unset or [true|false] but was [" + property + "]" + ); + } + } + + public static final Setting SNAPSHOT_REPOSITORY_SETTING = Setting.simpleString( + "index.store.snapshot.repository_name", + Setting.Property.IndexScope, + Setting.Property.PrivateIndex + ); + public static final Setting SNAPSHOT_SNAPSHOT_NAME_SETTING = Setting.simpleString( + "index.store.snapshot.snapshot_name", + Setting.Property.IndexScope, + Setting.Property.PrivateIndex + ); + public static final Setting SNAPSHOT_SNAPSHOT_ID_SETTING = Setting.simpleString( + "index.store.snapshot.snapshot_uuid", + Setting.Property.IndexScope, + Setting.Property.PrivateIndex + ); + public static final Setting SNAPSHOT_INDEX_ID_SETTING = Setting.simpleString( + "index.store.snapshot.index_uuid", + Setting.Property.IndexScope, + Setting.Property.PrivateIndex + ); + public static final Setting SNAPSHOT_CACHE_ENABLED_SETTING = Setting.boolSetting( + "index.store.snapshot.cache.enabled", + true, + Setting.Property.IndexScope + ); + // The file extensions that are excluded from the cache + public static final Setting> SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING = Setting.listSetting( + "index.store.snapshot.cache.excluded_file_types", + emptyList(), + Function.identity(), + Setting.Property.IndexScope, + Setting.Property.NodeScope + ); + public static final Setting SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING = Setting.byteSizeSetting( + "index.store.snapshot.uncached_chunk_size", + new ByteSizeValue(-1, ByteSizeUnit.BYTES), + Setting.Property.IndexScope, + Setting.Property.NodeScope + ); + + public static final String SNAPSHOT_DIRECTORY_FACTORY_KEY = "snapshot"; + + private final SetOnce repositoriesService; + private final SetOnce cacheService; + private final Settings settings; + + public SearchableSnapshots(final Settings settings) { + this.repositoriesService = new SetOnce<>(); + this.cacheService = new SetOnce<>(); + this.settings = settings; + } + + public static void ensureValidLicense(XPackLicenseState licenseState) { + if (licenseState.isAllowedByLicense(License.OperationMode.PLATINUM) == false) { + throw LicenseUtils.newComplianceException("searchable-snapshots"); + } + } + + @Override + public List> getSettings() { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED) { + return org.elasticsearch.common.collect.List.of( + SNAPSHOT_REPOSITORY_SETTING, + SNAPSHOT_SNAPSHOT_NAME_SETTING, + SNAPSHOT_SNAPSHOT_ID_SETTING, + SNAPSHOT_INDEX_ID_SETTING, + SNAPSHOT_CACHE_ENABLED_SETTING, + SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING, + SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING, + CacheService.SNAPSHOT_CACHE_SIZE_SETTING, + CacheService.SNAPSHOT_CACHE_RANGE_SIZE_SETTING + ); + } else { + return org.elasticsearch.common.collect.List.of(); + } + } + + @Override + public Collection createComponents( + final Client client, + final ClusterService clusterService, + final ThreadPool threadPool, + final ResourceWatcherService resourceWatcherService, + final ScriptService scriptService, + final NamedXContentRegistry xContentRegistry, + final Environment environment, + final NodeEnvironment nodeEnvironment, + final NamedWriteableRegistry registry, + final IndexNameExpressionResolver resolver + ) { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED) { + final CacheService cacheService = new CacheService(settings); + this.cacheService.set(cacheService); + return org.elasticsearch.common.collect.List.of(cacheService); + } else { + return org.elasticsearch.common.collect.List.of(); + } + } + + @Override + public void onRepositoriesModule(RepositoriesModule repositoriesModule) { + // TODO NORELEASE should we use some SPI mechanism? The only reason we are a RepositoriesPlugin is because of this :/ + repositoriesService.set(repositoriesModule.getRepositoryService()); + } + + @Override + public void onIndexModule(IndexModule indexModule) { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED && isSearchableSnapshotStore(indexModule.getSettings())) { + indexModule.addIndexEventListener(new SearchableSnapshotIndexEventListener()); + } + } + + @Override + public Map getDirectoryFactories() { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED) { + return org.elasticsearch.common.collect.Map.of(SNAPSHOT_DIRECTORY_FACTORY_KEY, (indexSettings, shardPath) -> { + final RepositoriesService repositories = repositoriesService.get(); + assert repositories != null; + final CacheService cache = cacheService.get(); + assert cache != null; + return SearchableSnapshotDirectory.create(repositories, cache, indexSettings, shardPath, System::nanoTime); + }); + } else { + return org.elasticsearch.common.collect.Map.of(); + } + } + + @Override + public Optional getEngineFactory(IndexSettings indexSettings) { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED + && isSearchableSnapshotStore(indexSettings.getSettings()) + && indexSettings.getSettings().getAsBoolean("index.frozen", false) == false) { + return Optional.of(engineConfig -> new ReadOnlyEngine(engineConfig, null, new TranslogStats(), false, Function.identity())); + } + return Optional.empty(); + } + + @Override + public List> getActions() { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED) { + return org.elasticsearch.common.collect.List.of( + new ActionHandler<>(SearchableSnapshotsStatsAction.INSTANCE, TransportSearchableSnapshotsStatsAction.class), + new ActionHandler<>(ClearSearchableSnapshotsCacheAction.INSTANCE, TransportClearSearchableSnapshotsCacheAction.class), + new ActionHandler<>(MountSearchableSnapshotAction.INSTANCE, TransportMountSearchableSnapshotAction.class) + ); + } else { + return org.elasticsearch.common.collect.List.of(); + } + } + + public List getRestHandlers( + Settings settings, + RestController restController, + ClusterSettings clusterSettings, + IndexScopedSettings indexScopedSettings, + SettingsFilter settingsFilter, + IndexNameExpressionResolver indexNameExpressionResolver, + Supplier nodesInCluster + ) { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED) { + return org.elasticsearch.common.collect.List.of( + new RestSearchableSnapshotsStatsAction(), + new RestClearSearchableSnapshotsCacheAction(), + new RestMountSearchableSnapshotAction() + ); + } else { + return org.elasticsearch.common.collect.List.of(); + } + } + + @Override + public Map getExistingShardsAllocators() { + if (SEARCHABLE_SNAPSHOTS_FEATURE_ENABLED) { + return Collections.singletonMap(SearchableSnapshotAllocator.ALLOCATOR_NAME, new SearchableSnapshotAllocator()); + } else { + return Collections.emptyMap(); + } + } + + static boolean isSearchableSnapshotStore(Settings indexSettings) { + return SNAPSHOT_DIRECTORY_FACTORY_KEY.equals(INDEX_STORE_TYPE_SETTING.get(indexSettings)); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java new file mode 100644 index 00000000000..4f53193c6e4 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/AbstractTransportSearchableSnapshotsAction.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.block.ClusterBlockLevel; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardsIterator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.shard.IndexShard; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; + +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; +import static org.elasticsearch.index.store.SearchableSnapshotDirectory.unwrapDirectory; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_DIRECTORY_FACTORY_KEY; + +public abstract class AbstractTransportSearchableSnapshotsAction< + Request extends BroadcastRequest, + Response extends BroadcastResponse, + ShardOperationResult extends Writeable> extends TransportBroadcastByNodeAction { + + private final IndicesService indicesService; + private final XPackLicenseState licenseState; + + AbstractTransportSearchableSnapshotsAction( + String actionName, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver resolver, + Writeable.Reader request, + String executor, + IndicesService indicesService, + XPackLicenseState licenseState + ) { + super(actionName, clusterService, transportService, actionFilters, resolver, request, executor); + this.indicesService = indicesService; + this.licenseState = Objects.requireNonNull(licenseState); + } + + AbstractTransportSearchableSnapshotsAction( + String actionName, + ClusterService clusterService, + TransportService transportService, + ActionFilters actionFilters, + IndexNameExpressionResolver resolver, + Writeable.Reader request, + String executor, + IndicesService indicesService, + XPackLicenseState licenseState, + boolean canTripCircuitBreaker + ) { + super(actionName, clusterService, transportService, actionFilters, resolver, request, executor, canTripCircuitBreaker); + this.indicesService = indicesService; + this.licenseState = Objects.requireNonNull(licenseState); + } + + @Override + protected ClusterBlockException checkGlobalBlock(ClusterState state, Request request) { + return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ); + } + + @Override + protected ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] indices) { + return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indices); + } + + @Override + protected ShardsIterator shards(ClusterState state, Request request, String[] concreteIndices) { + final List searchableSnapshotIndices = new ArrayList<>(); + for (String concreteIndex : concreteIndices) { + IndexMetadata indexMetaData = state.metadata().index(concreteIndex); + if (indexMetaData != null) { + Settings indexSettings = indexMetaData.getSettings(); + if (INDEX_STORE_TYPE_SETTING.get(indexSettings).equals(SNAPSHOT_DIRECTORY_FACTORY_KEY)) { + searchableSnapshotIndices.add(concreteIndex); + } + } + } + if (searchableSnapshotIndices.isEmpty()) { + throw new ResourceNotFoundException("No searchable snapshots indices found"); + } + return state.routingTable().allShards(searchableSnapshotIndices.toArray(new String[0])); + } + + @Override + protected ShardOperationResult shardOperation(Request request, ShardRouting shardRouting) throws IOException { + SearchableSnapshots.ensureValidLicense(licenseState); + final IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.index()).getShard(shardRouting.id()); + final SearchableSnapshotDirectory directory = unwrapDirectory(indexShard.store().directory()); + assert directory != null; + assert directory.getShardId().equals(shardRouting.shardId()); + return executeShardOperation(request, shardRouting, directory); + } + + protected abstract ShardOperationResult executeShardOperation( + Request request, + ShardRouting shardRouting, + SearchableSnapshotDirectory directory + ) throws IOException; +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java new file mode 100644 index 00000000000..4d8130227c5 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheAction.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.ActionType; + +public class ClearSearchableSnapshotsCacheAction extends ActionType { + + public static final ClearSearchableSnapshotsCacheAction INSTANCE = new ClearSearchableSnapshotsCacheAction(); + static final String NAME = "cluster:admin/xpack/searchable_snapshots/cache/clear"; + + private ClearSearchableSnapshotsCacheAction() { + super(NAME, ClearSearchableSnapshotsCacheResponse::new); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheRequest.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheRequest.java new file mode 100644 index 00000000000..1b86962e6cc --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheRequest.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class ClearSearchableSnapshotsCacheRequest extends BroadcastRequest { + + public ClearSearchableSnapshotsCacheRequest(StreamInput in) throws IOException { + super(in); + } + + public ClearSearchableSnapshotsCacheRequest(String... indices) { + super(indices); + } + + protected ClearSearchableSnapshotsCacheRequest(String[] indices, IndicesOptions indicesOptions) { + super(indices, indicesOptions); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java new file mode 100644 index 00000000000..9b9d8e5e37a --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/ClearSearchableSnapshotsCacheResponse.java @@ -0,0 +1,29 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; +import java.util.List; + +public class ClearSearchableSnapshotsCacheResponse extends BroadcastResponse { + + ClearSearchableSnapshotsCacheResponse(StreamInput in) throws IOException { + super(in); + } + + ClearSearchableSnapshotsCacheResponse( + int totalShards, + int successfulShards, + int failedShards, + List shardFailures + ) { + super(totalShards, successfulShards, failedShards, shardFailures); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsAction.java new file mode 100644 index 00000000000..f59e001cea2 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsAction.java @@ -0,0 +1,18 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.ActionType; + +public class SearchableSnapshotsStatsAction extends ActionType { + + public static final SearchableSnapshotsStatsAction INSTANCE = new SearchableSnapshotsStatsAction(); + static final String NAME = "cluster:monitor/xpack/searchable_snapshots/stats"; + + private SearchableSnapshotsStatsAction() { + super(NAME, SearchableSnapshotsStatsResponse::new); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsRequest.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsRequest.java new file mode 100644 index 00000000000..3659c9641d0 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsRequest.java @@ -0,0 +1,27 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.action.support.broadcast.BroadcastRequest; +import org.elasticsearch.common.io.stream.StreamInput; + +import java.io.IOException; + +public class SearchableSnapshotsStatsRequest extends BroadcastRequest { + + SearchableSnapshotsStatsRequest(StreamInput in) throws IOException { + super(in); + } + + public SearchableSnapshotsStatsRequest(String... indices) { + super(indices); + } + + public SearchableSnapshotsStatsRequest(String[] indices, IndicesOptions indicesOptions) { + super(indices, indicesOptions); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java new file mode 100644 index 00000000000..0f468bc2cc9 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponse.java @@ -0,0 +1,95 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.BroadcastResponse; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.io.stream.StreamOutput; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.index.Index; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats; + +import java.io.IOException; +import java.util.Comparator; +import java.util.List; +import java.util.Objects; +import java.util.stream.Collectors; + +import static java.util.stream.Collectors.toList; + +public class SearchableSnapshotsStatsResponse extends BroadcastResponse { + + private List stats; + + SearchableSnapshotsStatsResponse(StreamInput in) throws IOException { + super(in); + this.stats = in.readList(SearchableSnapshotShardStats::new); + } + + SearchableSnapshotsStatsResponse( + List stats, + int totalShards, + int successfulShards, + int failedShards, + List shardFailures + ) { + super(totalShards, successfulShards, failedShards, shardFailures); + this.stats = Objects.requireNonNull(stats); + } + + public List getStats() { + return stats; + } + + @Override + public void writeTo(StreamOutput out) throws IOException { + super.writeTo(out); + out.writeList(stats); + } + + @Override + protected void addCustomXContentFields(XContentBuilder builder, Params params) throws IOException { + final List indices = getStats().stream() + .filter(stats -> stats.getStats().isEmpty() == false) + .map(SearchableSnapshotShardStats::getShardRouting) + .map(ShardRouting::index) + .sorted(Comparator.comparing(Index::getName)) + .distinct() + .collect(toList()); + + builder.startObject("indices"); + for (Index index : indices) { + builder.startObject(index.getName()); + { + builder.startObject("shards"); + { + List listOfStats = getStats().stream() + .filter(dirStats -> dirStats.getShardRouting().index().equals(index)) + .sorted(Comparator.comparingInt(dir -> dir.getShardRouting().getId())) + .collect(Collectors.toList()); + + int minShard = listOfStats.stream().map(stat -> stat.getShardRouting().getId()).min(Integer::compareTo).orElse(0); + int maxShard = listOfStats.stream().map(stat -> stat.getShardRouting().getId()).max(Integer::compareTo).orElse(0); + + for (int i = minShard; i <= maxShard; i++) { + builder.startArray(Integer.toString(i)); + for (SearchableSnapshotShardStats stat : listOfStats) { + if (stat.getShardRouting().getId() == i) { + stat.toXContent(builder, params); + } + } + builder.endArray(); + } + } + builder.endObject(); + } + builder.endObject(); + } + builder.endObject(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java new file mode 100644 index 00000000000..c4264d5c248 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportClearSearchableSnapshotsCacheAction.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction.EmptyResult; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; + +import java.io.IOException; +import java.util.List; + +public class TransportClearSearchableSnapshotsCacheAction extends AbstractTransportSearchableSnapshotsAction< + ClearSearchableSnapshotsCacheRequest, + ClearSearchableSnapshotsCacheResponse, + EmptyResult> { + + @Inject + public TransportClearSearchableSnapshotsCacheAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + XPackLicenseState licenseState + ) { + super( + ClearSearchableSnapshotsCacheAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + ClearSearchableSnapshotsCacheRequest::new, + ThreadPool.Names.MANAGEMENT, + indicesService, + licenseState, + false + ); + } + + @Override + protected EmptyResult readShardResult(StreamInput in) { + return EmptyResult.readEmptyResultFrom(in); + } + + @Override + protected ClearSearchableSnapshotsCacheResponse newResponse( + ClearSearchableSnapshotsCacheRequest request, + int totalShards, + int successfulShards, + int failedShards, + List responses, + List shardFailures, + ClusterState clusterState + ) { + return new ClearSearchableSnapshotsCacheResponse(totalShards, successfulShards, failedShards, shardFailures); + } + + @Override + protected ClearSearchableSnapshotsCacheRequest readRequestFrom(StreamInput in) throws IOException { + return new ClearSearchableSnapshotsCacheRequest(in); + } + + @Override + protected EmptyResult executeShardOperation( + ClearSearchableSnapshotsCacheRequest request, + ShardRouting shardRouting, + SearchableSnapshotDirectory directory + ) { + directory.clearCache(); + return EmptyResult.INSTANCE; + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java new file mode 100644 index 00000000000..2ed2258e3de --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportMountSearchableSnapshotAction.java @@ -0,0 +1,188 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.action.StepListener; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.master.TransportMasterNodeAction; +import org.elasticsearch.client.Client; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.block.ClusterBlockException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.allocation.ExistingShardsAllocator; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexNotFoundException; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.RepositoriesService; +import org.elasticsearch.repositories.Repository; +import org.elasticsearch.repositories.RepositoryData; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; +import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshotAllocator; +import org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots; + +import java.io.IOException; +import java.util.Map; +import java.util.Objects; +import java.util.Optional; + +import static org.elasticsearch.index.IndexModule.INDEX_STORE_TYPE_SETTING; + +/** + * Action that mounts a snapshot as a searchable snapshot, by converting the mount request into a restore request with specific settings + * using {@link TransportMountSearchableSnapshotAction#buildIndexSettings(String, SnapshotId, IndexId)}. + * + * This action doesn't technically need to run on the master node, but it needs to get metadata from the repository and we only expect the + * repository to be accessible from data and master-eligible nodes so we can't run it everywhere. Given that we already have a way to run + * actions on the master and that we have to do the restore via the master, it's simplest to use {@link TransportMasterNodeAction}. + */ +public class TransportMountSearchableSnapshotAction extends TransportMasterNodeAction< + MountSearchableSnapshotRequest, + RestoreSnapshotResponse> { + + private final Client client; + private final RepositoriesService repositoriesService; + private final XPackLicenseState licenseState; + + @Inject + public TransportMountSearchableSnapshotAction( + TransportService transportService, + ClusterService clusterService, + Client client, + ThreadPool threadPool, + RepositoriesService repositoriesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + XPackLicenseState licenseState + ) { + super( + MountSearchableSnapshotAction.NAME, + transportService, + clusterService, + threadPool, + actionFilters, + MountSearchableSnapshotRequest::new, + indexNameExpressionResolver + ); + this.client = client; + this.repositoriesService = repositoriesService; + this.licenseState = Objects.requireNonNull(licenseState); + } + + @Override + protected String executor() { + // Avoid SNAPSHOT since snapshot threads may all be busy with long-running tasks which would block this action from responding with + // an error. Avoid SAME since getting the repository metadata may block on IO. + return ThreadPool.Names.GENERIC; + } + + @Override + protected RestoreSnapshotResponse read(StreamInput in) throws IOException { + return new RestoreSnapshotResponse(in); + } + + @Override + protected ClusterBlockException checkBlock(MountSearchableSnapshotRequest request, ClusterState state) { + // The restore action checks the cluster blocks. + return null; + } + + /** + * Return the index settings required to make a snapshot searchable + */ + private static Settings buildIndexSettings(String repoName, SnapshotId snapshotId, IndexId indexId) { + return Settings.builder() + .put(SearchableSnapshots.SNAPSHOT_REPOSITORY_SETTING.getKey(), repoName) + .put(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.getKey(), snapshotId.getName()) + .put(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.getKey(), snapshotId.getUUID()) + .put(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.getKey(), indexId.getId()) + .put(INDEX_STORE_TYPE_SETTING.getKey(), SearchableSnapshots.SNAPSHOT_DIRECTORY_FACTORY_KEY) + .put(IndexMetadata.SETTING_BLOCKS_WRITE, true) + .put(ExistingShardsAllocator.EXISTING_SHARDS_ALLOCATOR_SETTING.getKey(), SearchableSnapshotAllocator.ALLOCATOR_NAME) + .build(); + } + + @Override + protected void masterOperation( + final MountSearchableSnapshotRequest request, + final ClusterState state, + final ActionListener listener + ) { + SearchableSnapshots.ensureValidLicense(licenseState); + + final String repoName = request.repositoryName(); + final String snapName = request.snapshotName(); + final String indexName = request.snapshotIndexName(); + + // Retrieve IndexId and SnapshotId instances, which are then used to create a new restore + // request, which is then sent on to the actual snapshot restore mechanism + final Repository repository = repositoriesService.repository(repoName); + final StepListener repositoryDataListener = new StepListener<>(); + repository.getRepositoryData(repositoryDataListener); + repositoryDataListener.whenComplete(repoData -> { + final Map indexIds = repoData.getIndices(); + if (indexIds.containsKey(indexName) == false) { + throw new IndexNotFoundException("index [" + indexName + "] not found in repository [" + repoName + "]"); + } + final IndexId indexId = indexIds.get(indexName); + + final Optional matchingSnapshotId = repoData.getSnapshotIds() + .stream() + .filter(s -> snapName.equals(s.getName())) + .findFirst(); + if (matchingSnapshotId.isPresent() == false) { + throw new ElasticsearchException("snapshot [" + snapName + "] not found in repository [" + repoName + "]"); + } + final SnapshotId snapshotId = matchingSnapshotId.get(); + + // We must fail the restore if it obtains different IDs from the ones we just obtained (e.g. the target snapshot was replaced + // by one with the same name while we are restoring it) or else the index metadata might bear no relation to the snapshot we're + // searching. TODO NORELEASE validate IDs in the restore. + + client.admin() + .cluster() + .restoreSnapshot( + new RestoreSnapshotRequest(repoName, snapName) + // Restore the single index specified + .indices(indexName) + // Always rename it to the desired mounted index name + .renamePattern(".+") + .renameReplacement(request.mountedIndexName()) + // Pass through index settings, adding the index-level settings required to use searchable snapshots + .indexSettings( + Settings.builder() + .put(request.indexSettings()) + .put(buildIndexSettings(request.repositoryName(), snapshotId, indexId)) + .build() + ) + // Pass through ignored index settings + .ignoreIndexSettings(request.ignoreIndexSettings()) + // Don't include global state + .includeGlobalState(false) + // Don't include aliases + .includeAliases(false) + // Pass through the wait-for-completion flag + .waitForCompletion(request.waitForCompletion()) + // Pass through the master-node timeout + .masterNodeTimeout(request.masterNodeTimeout()), + listener + ); + }, listener::onFailure); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportSearchableSnapshotsStatsAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportSearchableSnapshotsStatsAction.java new file mode 100644 index 00000000000..a2e9ac0c1f4 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/action/TransportSearchableSnapshotsStatsAction.java @@ -0,0 +1,124 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.action.support.ActionFilters; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.cluster.ClusterState; +import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.service.ClusterService; +import org.elasticsearch.common.inject.Inject; +import org.elasticsearch.common.io.stream.StreamInput; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; +import org.elasticsearch.indices.IndicesService; +import org.elasticsearch.license.XPackLicenseState; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.transport.TransportService; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats.CacheIndexInputStats; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats.Counter; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats.TimedCounter; +import org.elasticsearch.index.store.IndexInputStats; + +import java.io.IOException; +import java.util.List; +import java.util.stream.Collectors; + +public class TransportSearchableSnapshotsStatsAction extends AbstractTransportSearchableSnapshotsAction< + SearchableSnapshotsStatsRequest, + SearchableSnapshotsStatsResponse, + SearchableSnapshotShardStats> { + @Inject + public TransportSearchableSnapshotsStatsAction( + ClusterService clusterService, + TransportService transportService, + IndicesService indicesService, + ActionFilters actionFilters, + IndexNameExpressionResolver indexNameExpressionResolver, + XPackLicenseState licenseState + ) { + super( + SearchableSnapshotsStatsAction.NAME, + clusterService, + transportService, + actionFilters, + indexNameExpressionResolver, + SearchableSnapshotsStatsRequest::new, + ThreadPool.Names.MANAGEMENT, + indicesService, + licenseState + ); + } + + @Override + protected SearchableSnapshotShardStats readShardResult(StreamInput in) throws IOException { + return new SearchableSnapshotShardStats(in); + } + + @Override + protected SearchableSnapshotsStatsResponse newResponse( + SearchableSnapshotsStatsRequest request, + int totalShards, + int successfulShards, + int failedShards, + List shardsStats, + List shardFailures, + ClusterState clusterState + ) { + return new SearchableSnapshotsStatsResponse(shardsStats, totalShards, successfulShards, failedShards, shardFailures); + } + + @Override + protected SearchableSnapshotsStatsRequest readRequestFrom(StreamInput in) throws IOException { + return new SearchableSnapshotsStatsRequest(in); + } + + @Override + protected SearchableSnapshotShardStats executeShardOperation( + SearchableSnapshotsStatsRequest request, + ShardRouting shardRouting, + SearchableSnapshotDirectory directory + ) { + return new SearchableSnapshotShardStats( + shardRouting, + directory.getSnapshotId(), + directory.getIndexId(), + directory.getStats() + .entrySet() + .stream() + .map(entry -> toCacheIndexInputStats(entry.getKey(), entry.getValue())) + .collect(Collectors.toList()) + ); + } + + private static CacheIndexInputStats toCacheIndexInputStats(final String fileName, final IndexInputStats inputStats) { + return new CacheIndexInputStats( + fileName, + inputStats.getFileLength(), + inputStats.getOpened().sum(), + inputStats.getClosed().sum(), + toCounter(inputStats.getForwardSmallSeeks()), + toCounter(inputStats.getBackwardSmallSeeks()), + toCounter(inputStats.getForwardLargeSeeks()), + toCounter(inputStats.getBackwardLargeSeeks()), + toCounter(inputStats.getContiguousReads()), + toCounter(inputStats.getNonContiguousReads()), + toCounter(inputStats.getCachedBytesRead()), + toTimedCounter(inputStats.getCachedBytesWritten()), + toTimedCounter(inputStats.getDirectBytesRead()), + toTimedCounter(inputStats.getOptimizedBytesRead()) + ); + } + + private static Counter toCounter(final IndexInputStats.Counter counter) { + return new Counter(counter.count(), counter.total(), counter.min(), counter.max()); + } + + private static TimedCounter toTimedCounter(final IndexInputStats.TimedCounter counter) { + return new TimedCounter(counter.count(), counter.total(), counter.min(), counter.max(), counter.totalNanoseconds()); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/CacheService.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/CacheService.java new file mode 100644 index 00000000000..88a4a3c1181 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/cache/CacheService.java @@ -0,0 +1,132 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.cache; + +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.cache.Cache; +import org.elasticsearch.common.cache.CacheBuilder; +import org.elasticsearch.common.component.AbstractLifecycleComponent; +import org.elasticsearch.common.component.Lifecycle; +import org.elasticsearch.common.settings.Setting; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.index.store.cache.CacheFile; +import org.elasticsearch.index.store.cache.CacheKey; + +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.Objects; +import java.util.function.Predicate; + +/** + * {@link CacheService} maintains a cache entry for all files read from searchable snapshot directories ( + * see {@link org.elasticsearch.index.store.SearchableSnapshotDirectory}) + */ +public class CacheService extends AbstractLifecycleComponent { + + private static final String SETTINGS_PREFIX = "xpack.searchable.snapshot.cache."; + + public static final Setting SNAPSHOT_CACHE_SIZE_SETTING = Setting.byteSizeSetting( + SETTINGS_PREFIX + "size", + new ByteSizeValue(1, ByteSizeUnit.GB), // TODO: size the default value according to disk space + new ByteSizeValue(0, ByteSizeUnit.BYTES), // min + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), // max + Setting.Property.NodeScope + ); + + public static final Setting SNAPSHOT_CACHE_RANGE_SIZE_SETTING = Setting.byteSizeSetting( + SETTINGS_PREFIX + "range_size", + new ByteSizeValue(32, ByteSizeUnit.MB), // default + new ByteSizeValue(4, ByteSizeUnit.KB), // min + new ByteSizeValue(Long.MAX_VALUE, ByteSizeUnit.BYTES), // max + Setting.Property.NodeScope + ); + + private final Cache cache; + private final ByteSizeValue cacheSize; + private final ByteSizeValue rangeSize; + + public CacheService(final Settings settings) { + this(SNAPSHOT_CACHE_SIZE_SETTING.get(settings), SNAPSHOT_CACHE_RANGE_SIZE_SETTING.get(settings)); + } + + // overridable by tests + public CacheService(final ByteSizeValue cacheSize, final ByteSizeValue rangeSize) { + this.cacheSize = Objects.requireNonNull(cacheSize); + this.rangeSize = Objects.requireNonNull(rangeSize); + this.cache = CacheBuilder.builder() + .setMaximumWeight(cacheSize.getBytes()) + .weigher((key, entry) -> entry.getLength()) + // NORELEASE This does not immediately free space on disk, as cache file are only deleted when all index inputs + // are done with reading/writing the cache file + .removalListener(notification -> IOUtils.closeWhileHandlingException(() -> notification.getValue().startEviction())) + .build(); + } + + @Override + protected void doStart() { + // NORELEASE TODO clean up (or rebuild) cache from disk as a node crash may leave cached files + } + + @Override + protected void doStop() { + cache.invalidateAll(); + } + + @Override + protected void doClose() {} + + private void ensureLifecycleStarted() { + final Lifecycle.State state = lifecycleState(); + if (state != Lifecycle.State.STARTED) { + throw new IllegalStateException("Failed to read data from cache: cache service is not started [" + state + "]"); + } + } + + /** + * @return the cache size (in bytes) + */ + public long getCacheSize() { + return cacheSize.getBytes(); + } + + /** + * @return the cache range size (in bytes) + */ + public int getRangeSize() { + return Math.toIntExact(rangeSize.getBytes()); + } + + public CacheFile get(final CacheKey cacheKey, final long fileLength, final Path cacheDir) throws Exception { + ensureLifecycleStarted(); + return cache.computeIfAbsent(cacheKey, key -> { + ensureLifecycleStarted(); + // generate a random UUID for the name of the cache file on disk + final String uuid = UUIDs.randomBase64UUID(); + // resolve the cache file on disk w/ the expected cached file + final Path path = cacheDir.resolve(uuid); + assert Files.notExists(path) : "cache file already exists " + path; + + return new CacheFile(key.toString(), fileLength, path, getRangeSize()); + }); + } + + /** + * Invalidate cache entries with keys matching the given predicate + * + * @param predicate the predicate to evaluate + */ + public void removeFromCache(final Predicate predicate) { + for (CacheKey cacheKey : cache.keys()) { + if (predicate.test(cacheKey)) { + cache.invalidate(cacheKey); + } + } + cache.refresh(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestClearSearchableSnapshotsCacheAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestClearSearchableSnapshotsCacheAction.java new file mode 100644 index 00000000000..e7972ebfdd9 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestClearSearchableSnapshotsCacheAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.rest; + +import org.elasticsearch.action.support.IndicesOptions; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; +import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheRequest; + +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; + +public class RestClearSearchableSnapshotsCacheAction extends BaseRestHandler { + + @Override + public List routes() { + return unmodifiableList( + asList( + new Route(RestRequest.Method.POST, "/_searchable_snapshots/cache/clear"), + new Route(RestRequest.Method.POST, "/{index}/_searchable_snapshots/cache/clear") + ) + ); + } + + @Override + public String getName() { + return "clear_indices_searchable_snapshots_cache_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { + final ClearSearchableSnapshotsCacheRequest request = new ClearSearchableSnapshotsCacheRequest(); + request.indices(Strings.splitStringByCommaToArray(restRequest.param("index"))); + request.indicesOptions(IndicesOptions.fromRequest(restRequest, request.indicesOptions())); + return channel -> client.execute(ClearSearchableSnapshotsCacheAction.INSTANCE, request, new RestToXContentListener<>(channel)); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java new file mode 100644 index 00000000000..d4e5ddf96f0 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestMountSearchableSnapshotAction.java @@ -0,0 +1,46 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchablesnapshots.rest; + +import org.elasticsearch.action.support.master.MasterNodeRequest; +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; + +import static org.elasticsearch.rest.RestRequest.Method.POST; + +public class RestMountSearchableSnapshotAction extends BaseRestHandler { + @Override + public String getName() { + return "mount_snapshot_action"; + } + + @Override + public List routes() { + return Collections.singletonList(new Route(POST, "/_snapshot/{repository}/{snapshot}/_mount")); + } + + @Override + protected RestChannelConsumer prepareRequest(RestRequest request, NodeClient client) throws IOException { + MountSearchableSnapshotRequest mountSearchableSnapshotRequest = MountSearchableSnapshotRequest.PARSER.apply( + request.contentParser(), + request + ).masterNodeTimeout(request.paramAsTime("master_timeout", MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT)); + return channel -> client.execute( + MountSearchableSnapshotAction.INSTANCE, + mountSearchableSnapshotRequest, + new RestToXContentListener<>(channel) + ); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestSearchableSnapshotsStatsAction.java b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestSearchableSnapshotsStatsAction.java new file mode 100644 index 00000000000..cb44bd8be81 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/main/java/org/elasticsearch/xpack/searchablesnapshots/rest/RestSearchableSnapshotsStatsAction.java @@ -0,0 +1,47 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots.rest; + +import org.elasticsearch.client.node.NodeClient; +import org.elasticsearch.common.Strings; +import org.elasticsearch.rest.BaseRestHandler; +import org.elasticsearch.rest.RestRequest; +import org.elasticsearch.rest.action.RestToXContentListener; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsAction; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsRequest; + +import java.util.List; + +import static java.util.Arrays.asList; +import static java.util.Collections.unmodifiableList; + +public class RestSearchableSnapshotsStatsAction extends BaseRestHandler { + + @Override + public List routes() { + return unmodifiableList( + asList( + new Route(RestRequest.Method.GET, "/_searchable_snapshots/stats"), + new Route(RestRequest.Method.GET, "/{index}/_searchable_snapshots/stats") + ) + ); + } + + @Override + public String getName() { + return "searchable_snapshots_stats_action"; + } + + @Override + public RestChannelConsumer prepareRequest(final RestRequest restRequest, final NodeClient client) { + String[] indices = Strings.splitStringByCommaToArray(restRequest.param("index")); + return channel -> client.execute( + SearchableSnapshotsStatsAction.INSTANCE, + new SearchableSnapshotsStatsRequest(indices), + new RestToXContentListener<>(channel) + ); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/InMemoryNoOpCommitDirectoryTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/InMemoryNoOpCommitDirectoryTests.java new file mode 100644 index 00000000000..cbeb4edc201 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/InMemoryNoOpCommitDirectoryTests.java @@ -0,0 +1,212 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.TopDocs; +import org.apache.lucene.search.TotalHits; +import org.apache.lucene.store.AlreadyClosedException; +import org.apache.lucene.store.ByteBuffersDirectory; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.FilterDirectory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.store.IndexOutput; +import org.apache.lucene.store.Lock; +import org.apache.lucene.store.NoLockFactory; +import org.elasticsearch.test.ESTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Set; + +import static java.util.Collections.singletonList; +import static java.util.Collections.singletonMap; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasItem; +import static org.hamcrest.Matchers.not; + +public class InMemoryNoOpCommitDirectoryTests extends ESTestCase { + + private ByteBuffersDirectory readOnlyDirectory; + private InMemoryNoOpCommitDirectory inMemoryNoOpCommitDirectory; + + @Before + public void createDirectories() { + readOnlyDirectory = new ByteBuffersDirectory(NoLockFactory.INSTANCE); + inMemoryNoOpCommitDirectory = new InMemoryNoOpCommitDirectory(new FilterDirectory(readOnlyDirectory) { + // wrapper around readOnlyDirectory to assert that we make no attempt to write to it + + @Override + public void deleteFile(String name) { + throw new AssertionError("not supported"); + } + + @Override + public IndexOutput createOutput(String name, IOContext context) { + throw new AssertionError("not supported"); + } + + @Override + public IndexOutput createTempOutput(String prefix, String suffix, IOContext context) { + throw new AssertionError("not supported"); + } + + @Override + public void rename(String source, String dest) { + throw new AssertionError("not supported"); + } + + @Override + public Lock obtainLock(String name) { + throw new AssertionError("not supported"); + } + + @Override + public Set getPendingDeletions() { + throw new AssertionError("not supported"); + } + + @Override + public void copyFrom(Directory from, String src, String dest, IOContext context) { + throw new AssertionError("not supported"); + } + }); + } + + @After + public void closeDirectories() throws IOException { + inMemoryNoOpCommitDirectory.close(); + expectThrows(AlreadyClosedException.class, () -> readOnlyDirectory.listAll()); + } + + public void testAllowsWritingSegmentsFiles() throws IOException { + assertCanWrite("segments_" + randomAlphaOfLength(10)); + assertCanWrite("pending_segments_" + randomAlphaOfLength(10)); + assertCanWrite("recovery." + randomAlphaOfLength(10) + ".segments_" + randomAlphaOfLength(10)); + } + + public void testForbidsWritingOtherFiles() { + expectThrows(IllegalArgumentException.class, () -> assertCanWrite("not_a_segments_file")); + } + + private void assertCanWrite(String name) throws IOException { + final String s = randomAlphaOfLength(10); + try (IndexOutput output = inMemoryNoOpCommitDirectory.createOutput(name, IOContext.DEFAULT)) { + output.writeString(s); + } + + try (IndexInput input = inMemoryNoOpCommitDirectory.openInput(name, IOContext.DEFAULT)) { + assertThat(input.readString(), equalTo(s)); + } + + if (randomBoolean()) { + inMemoryNoOpCommitDirectory.sync(singletonList(name)); + } + + if (randomBoolean()) { + inMemoryNoOpCommitDirectory.syncMetaData(); + } + + assertThat(inMemoryNoOpCommitDirectory.fileLength(name), equalTo((long) StandardCharsets.UTF_8.encode(s).array().length)); + + assertThat(Arrays.asList(inMemoryNoOpCommitDirectory.listAll()), hasItem(name)); + + inMemoryNoOpCommitDirectory.deleteFile(name); + + assertThat(Arrays.asList(inMemoryNoOpCommitDirectory.listAll()), not(hasItem(name))); + } + + public void testExposesFileFromRealDirectory() throws IOException { + final String name = randomAlphaOfLength(10); + assertExposesRealFiles(name); + expectThrows(IllegalArgumentException.class, () -> inMemoryNoOpCommitDirectory.deleteFile(name)); + assertThat(Arrays.asList(inMemoryNoOpCommitDirectory.listAll()), hasItem(name)); + } + + public void testSilentlyIgnoresAttemptsToDeleteInnerSegmentsFiles() throws IOException { + final String name = "segments_" + randomAlphaOfLength(10); + assertExposesRealFiles(name); + inMemoryNoOpCommitDirectory.deleteFile(name); // no-op + assertThat(Arrays.asList(inMemoryNoOpCommitDirectory.listAll()), hasItem(name)); + readOnlyDirectory.deleteFile(name); + assertThat(Arrays.asList(inMemoryNoOpCommitDirectory.listAll()), not(hasItem(name))); + } + + private void assertExposesRealFiles(String name) throws IOException { + final String s = randomAlphaOfLength(10); + + try (IndexOutput output = readOnlyDirectory.createOutput(name, IOContext.DEFAULT)) { + output.writeString(s); + } + + try (IndexInput input = inMemoryNoOpCommitDirectory.openInput(name, IOContext.DEFAULT)) { + assertThat(input.readString(), equalTo(s)); + } + + assertThat(inMemoryNoOpCommitDirectory.fileLength(name), equalTo((long) StandardCharsets.UTF_8.encode(s).array().length)); + + assertThat(Arrays.asList(inMemoryNoOpCommitDirectory.listAll()), hasItem(name)); + } + + public void testSupportsNoOpCommits() throws IOException { + try (IndexWriter indexWriter = new IndexWriter(readOnlyDirectory, new IndexWriterConfig())) { + final Document document = new Document(); + document.add(new TextField("foo", "bar", Field.Store.YES)); + indexWriter.addDocument(document); + indexWriter.setLiveCommitData(singletonMap("user_data", "original").entrySet()); + indexWriter.commit(); + } + + try (DirectoryReader directoryReader = DirectoryReader.open(inMemoryNoOpCommitDirectory)) { + assertThat(directoryReader.getIndexCommit().getUserData().get("user_data"), equalTo("original")); + final TopDocs topDocs = new IndexSearcher(directoryReader).search(new MatchAllDocsQuery(), 1); + assertThat(topDocs.totalHits, equalTo(new TotalHits(1L, TotalHits.Relation.EQUAL_TO))); + assertThat(topDocs.scoreDocs.length, equalTo(1)); + assertThat(directoryReader.document(topDocs.scoreDocs[0].doc).getField("foo").stringValue(), equalTo("bar")); + } + + try (IndexWriter indexWriter = new IndexWriter(inMemoryNoOpCommitDirectory, new IndexWriterConfig())) { + indexWriter.setLiveCommitData(singletonMap("user_data", "updated").entrySet()); + indexWriter.commit(); + } + + try (DirectoryReader directoryReader = DirectoryReader.open(inMemoryNoOpCommitDirectory)) { + assertThat(directoryReader.getIndexCommit().getUserData().get("user_data"), equalTo("updated")); + } + } + + public void testRejectsDocumentChanges() throws IOException { + if (randomBoolean()) { + try (IndexWriter indexWriter = new IndexWriter(readOnlyDirectory, new IndexWriterConfig())) { + final Document document = new Document(); + document.add(new TextField("foo", "bar", Field.Store.YES)); + indexWriter.addDocument(document); + indexWriter.commit(); + } + } + + try (IndexWriter indexWriter = new IndexWriter(inMemoryNoOpCommitDirectory, new IndexWriterConfig())) { + final Document document = new Document(); + document.add(new TextField("foo", "baz", Field.Store.YES)); + expectThrows(IllegalArgumentException.class, () -> { + indexWriter.addDocument(document); + indexWriter.commit(); + }); + } + } + +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/IndexInputStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/IndexInputStatsTests.java new file mode 100644 index 00000000000..c068c97a651 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/IndexInputStatsTests.java @@ -0,0 +1,128 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.elasticsearch.test.ESTestCase; + +import java.util.function.LongSupplier; + +import static org.elasticsearch.index.store.IndexInputStats.SEEKING_THRESHOLD; +import static org.elasticsearch.index.store.cache.TestUtils.assertCounter; + +public class IndexInputStatsTests extends ESTestCase { + + private static LongSupplier FAKE_CLOCK = () -> { + assert false : "should not be called"; + return -1L; + }; + + public void testReads() { + final long fileLength = randomLongBetween(1L, 1_000L); + final IndexInputStats inputStats = new IndexInputStats(fileLength, FAKE_CLOCK); + + assertCounter(inputStats.getContiguousReads(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getNonContiguousReads(), 0L, 0L, 0L, 0L); + + final IndexInputStats.Counter contiguous = new IndexInputStats.Counter(); + final IndexInputStats.Counter nonContiguous = new IndexInputStats.Counter(); + + for (int i = 0; i < randomIntBetween(1, 50); i++) { + final long currentPosition = randomLongBetween(0L, inputStats.getFileLength() - 1L); + final long previousPosition = randomBoolean() ? currentPosition : randomLongBetween(0L, inputStats.getFileLength() - 1L); + final int bytesRead = randomIntBetween(1, Math.toIntExact(Math.max(1L, inputStats.getFileLength() - currentPosition))); + + inputStats.incrementBytesRead(previousPosition, currentPosition, bytesRead); + + if (previousPosition == currentPosition) { + contiguous.add(bytesRead); + } else { + nonContiguous.add(bytesRead); + } + } + + assertCounter(inputStats.getContiguousReads(), contiguous.total(), contiguous.count(), contiguous.min(), contiguous.max()); + assertCounter( + inputStats.getNonContiguousReads(), + nonContiguous.total(), + nonContiguous.count(), + nonContiguous.min(), + nonContiguous.max() + ); + } + + public void testSeeks() { + final long fileLength = randomLongBetween(1L, 1_000L); + final long seekingThreshold = randomBoolean() ? randomLongBetween(1L, fileLength) : SEEKING_THRESHOLD.getBytes(); + final IndexInputStats inputStats = new IndexInputStats(fileLength, seekingThreshold, FAKE_CLOCK); + + assertCounter(inputStats.getForwardSmallSeeks(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getForwardLargeSeeks(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getBackwardSmallSeeks(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getBackwardLargeSeeks(), 0L, 0L, 0L, 0L); + + final IndexInputStats.Counter fwSmallSeeks = new IndexInputStats.Counter(); + final IndexInputStats.Counter fwLargeSeeks = new IndexInputStats.Counter(); + final IndexInputStats.Counter bwSmallSeeks = new IndexInputStats.Counter(); + final IndexInputStats.Counter bwLargeSeeks = new IndexInputStats.Counter(); + + for (int i = 0; i < randomIntBetween(1, 50); i++) { + final long currentPosition = randomLongBetween(0L, fileLength); + final long seekToPosition = randomLongBetween(0L, fileLength); + inputStats.incrementSeeks(currentPosition, seekToPosition); + + final long delta = seekToPosition - currentPosition; + if (delta > 0) { + IndexInputStats.Counter forwardCounter = (delta <= seekingThreshold) ? fwSmallSeeks : fwLargeSeeks; + forwardCounter.add(delta); + } else if (delta < 0) { + IndexInputStats.Counter backwardCounter = (delta >= -1 * seekingThreshold) ? bwSmallSeeks : bwLargeSeeks; + backwardCounter.add(delta); + } + } + + assertCounter( + inputStats.getForwardSmallSeeks(), + fwSmallSeeks.total(), + fwSmallSeeks.count(), + fwSmallSeeks.min(), + fwSmallSeeks.max() + ); + assertCounter( + inputStats.getForwardLargeSeeks(), + fwLargeSeeks.total(), + fwLargeSeeks.count(), + fwLargeSeeks.min(), + fwLargeSeeks.max() + ); + + assertCounter( + inputStats.getBackwardSmallSeeks(), + bwSmallSeeks.total(), + bwSmallSeeks.count(), + bwSmallSeeks.min(), + bwSmallSeeks.max() + ); + assertCounter( + inputStats.getBackwardLargeSeeks(), + bwLargeSeeks.total(), + bwLargeSeeks.count(), + bwLargeSeeks.min(), + bwLargeSeeks.max() + ); + } + + public void testSeekToSamePosition() { + final IndexInputStats inputStats = new IndexInputStats(randomLongBetween(1L, 1_000L), FAKE_CLOCK); + final long position = randomLongBetween(0L, inputStats.getFileLength()); + + inputStats.incrementSeeks(position, position); + + assertCounter(inputStats.getForwardSmallSeeks(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getForwardLargeSeeks(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getBackwardSmallSeeks(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getBackwardLargeSeeks(), 0L, 0L, 0L, 0L); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/SearchableSnapshotDirectoryStatsTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/SearchableSnapshotDirectoryStatsTests.java new file mode 100644 index 00000000000..8c662ec03cd --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/SearchableSnapshotDirectoryStatsTests.java @@ -0,0 +1,606 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.apache.lucene.store.BufferedIndexInput; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.Version; +import org.elasticsearch.common.TriConsumer; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.lucene.store.ESIndexInputTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; +import org.elasticsearch.index.store.cache.TestUtils; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.atomic.AtomicLong; +import java.util.function.LongSupplier; + +import static org.elasticsearch.index.store.cache.TestUtils.assertCounter; +import static org.elasticsearch.index.store.cache.TestUtils.createCacheService; +import static org.elasticsearch.index.store.cache.TestUtils.singleBlobContainer; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; + +public class SearchableSnapshotDirectoryStatsTests extends ESIndexInputTestCase { + + private static final int MAX_FILE_LENGTH = 10_000; + + /** + * These tests simulate the passage of time with a clock that advances 100ms each time it is read. + */ + private static final long FAKE_CLOCK_ADVANCE_NANOS = TimeValue.timeValueMillis(100).nanos(); + + public void testOpenCount() { + executeTestCase((fileName, fileContent, directory) -> { + try { + for (long i = 0L; i < randomLongBetween(1L, 20L); i++) { + IndexInputStats inputStats = directory.getStats(fileName); + assertThat(inputStats, (i == 0L) ? nullValue() : notNullValue()); + + final IndexInput input = directory.openInput(fileName, newIOContext(random())); + inputStats = directory.getStats(fileName); + assertThat(inputStats.getOpened().longValue(), equalTo(i + 1L)); + input.close(); + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testCloseCount() { + executeTestCase((fileName, fileContent, directory) -> { + try { + for (long i = 0L; i < randomLongBetween(1L, 20L); i++) { + final IndexInput input = directory.openInput(fileName, newIOContext(random())); + + IndexInputStats inputStats = directory.getStats(fileName); + assertThat(inputStats, notNullValue()); + + assertThat(inputStats.getClosed().longValue(), equalTo(i)); + input.close(); + assertThat(inputStats.getClosed().longValue(), equalTo(i + 1L)); + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testCachedBytesReadsAndWrites() { + // a cache service with a low range size but enough space to not evict the cache file + final ByteSizeValue rangeSize = new ByteSizeValue(randomIntBetween(512, MAX_FILE_LENGTH), ByteSizeUnit.BYTES); + final ByteSizeValue cacheSize = new ByteSizeValue(1, ByteSizeUnit.GB); + + executeTestCaseWithCache(cacheSize, rangeSize, (fileName, fileContent, directory) -> { + try (IndexInput input = directory.openInput(fileName, newIOContext(random()))) { + final long length = input.length(); + + final IndexInputStats inputStats = directory.getStats(fileName); + assertThat(inputStats, notNullValue()); + + final byte[] result = randomReadAndSlice(input, Math.toIntExact(length)); + assertArrayEquals(fileContent, result); + + final long cachedBytesWriteCount = TestUtils.numberOfRanges(length, rangeSize.getBytes()); + + assertThat(inputStats.getCachedBytesWritten(), notNullValue()); + assertThat(inputStats.getCachedBytesWritten().total(), equalTo(length)); + assertThat(inputStats.getCachedBytesWritten().count(), equalTo(cachedBytesWriteCount)); + assertThat(inputStats.getCachedBytesWritten().min(), greaterThan(0L)); + assertThat( + inputStats.getCachedBytesWritten().max(), + (length < rangeSize.getBytes()) ? equalTo(length) : equalTo(rangeSize.getBytes()) + ); + assertThat( + inputStats.getCachedBytesWritten().totalNanoseconds(), + equalTo(cachedBytesWriteCount * FAKE_CLOCK_ADVANCE_NANOS) + ); + + assertThat(inputStats.getCachedBytesRead(), notNullValue()); + assertThat(inputStats.getCachedBytesRead().total(), greaterThanOrEqualTo(length)); + assertThat(inputStats.getCachedBytesRead().count(), greaterThan(0L)); + assertThat(inputStats.getCachedBytesRead().min(), greaterThan(0L)); + assertThat( + inputStats.getCachedBytesRead().max(), + (length < rangeSize.getBytes()) ? lessThanOrEqualTo(length) : lessThanOrEqualTo(rangeSize.getBytes()) + ); + + assertCounter(inputStats.getDirectBytesRead(), 0L, 0L, 0L, 0L); + assertThat(inputStats.getDirectBytesRead().totalNanoseconds(), equalTo(0L)); + + assertCounter(inputStats.getOptimizedBytesRead(), 0L, 0L, 0L, 0L); + assertThat(inputStats.getOptimizedBytesRead().totalNanoseconds(), equalTo(0L)); + + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testCachedBytesReadsAndWritesNoCache() { + final ByteSizeValue uncachedChunkSize = new ByteSizeValue(randomIntBetween(512, MAX_FILE_LENGTH), ByteSizeUnit.BYTES); + executeTestCaseWithoutCache(uncachedChunkSize, (fileName, fileContent, directory) -> { + try (IndexInput input = directory.openInput(fileName, newIOContext(random()))) { + final long length = input.length(); + + final IndexInputStats inputStats = directory.getStats(fileName); + assertThat(inputStats, notNullValue()); + + final byte[] result = randomReadAndSlice(input, Math.toIntExact(length)); + assertArrayEquals(fileContent, result); + + assertThat(inputStats.getCachedBytesWritten(), notNullValue()); + assertCounter(inputStats.getCachedBytesWritten(), 0L, 0L, 0L, 0L); + + assertThat(inputStats.getCachedBytesRead(), notNullValue()); + assertCounter(inputStats.getCachedBytesRead(), 0L, 0L, 0L, 0L); + + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testDirectBytesReadsWithCache() { + // Cache service always evicts files + executeTestCaseWithCache(ByteSizeValue.ZERO, ByteSizeValue.ZERO, (fileName, fileContent, directory) -> { + assertThat(directory.getStats(fileName), nullValue()); + + final IOContext ioContext = newIOContext(random()); + try { + IndexInput input = directory.openInput(fileName, ioContext); + if (randomBoolean()) { + input = input.slice("test", 0L, input.length()); + } + if (randomBoolean()) { + input = input.clone(); + } + final IndexInputStats inputStats = directory.getStats(fileName); + + // account for internal buffered reads + final long bufferSize = BufferedIndexInput.bufferSize(ioContext); + final long remaining = input.length() % bufferSize; + final long expectedTotal = input.length(); + final long expectedCount = input.length() / bufferSize + (remaining > 0L ? 1L : 0L); + final long minRead = remaining > 0L ? remaining : bufferSize; + final long maxRead = Math.min(input.length(), bufferSize); + + // read all index input sequentially as it simplifies testing + final byte[] readBuffer = new byte[512]; + for (long i = 0L; i < input.length();) { + int size = between(1, Math.toIntExact(Math.min(readBuffer.length, input.length() - input.getFilePointer()))); + input.readBytes(readBuffer, 0, size); + i += size; + + // direct cache file reads are aligned with the internal buffer + long currentCount = i / bufferSize + (i % bufferSize > 0L ? 1L : 0L); + if (currentCount < expectedCount) { + assertCounter(inputStats.getDirectBytesRead(), currentCount * bufferSize, currentCount, bufferSize, bufferSize); + } else { + assertCounter(inputStats.getDirectBytesRead(), expectedTotal, expectedCount, minRead, maxRead); + } + assertThat(inputStats.getDirectBytesRead().totalNanoseconds(), equalTo(currentCount * FAKE_CLOCK_ADVANCE_NANOS)); + } + + // cache file has never been written nor read + assertCounter(inputStats.getCachedBytesWritten(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getCachedBytesRead(), 0L, 0L, 0L, 0L); + assertThat(inputStats.getCachedBytesWritten().totalNanoseconds(), equalTo(0L)); + + input.close(); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testDirectBytesReadsWithoutCache() { + final ByteSizeValue uncachedChunkSize = new ByteSizeValue(randomIntBetween(512, MAX_FILE_LENGTH), ByteSizeUnit.BYTES); + executeTestCaseWithoutCache(uncachedChunkSize, (fileName, fileContent, directory) -> { + assertThat(directory.getStats(fileName), nullValue()); + + final IOContext ioContext = newIOContext(random()); + try (IndexInput original = directory.openInput(fileName, ioContext)) { + final IndexInput input = original.clone(); // always clone to only execute direct reads + final IndexInputStats inputStats = directory.getStats(fileName); + + // account for internal buffered reads + final long bufferSize = BufferedIndexInput.bufferSize(ioContext); + final long remaining = input.length() % bufferSize; + final long expectedTotal = input.length(); + final long expectedCount = input.length() / bufferSize + (remaining > 0L ? 1L : 0L); + final long minRead = remaining > 0L ? remaining : bufferSize; + final long maxRead = Math.min(input.length(), bufferSize); + + // read all index input sequentially as it simplifies testing + for (long i = 0L; i < input.length(); i++) { + input.readByte(); + } + + assertCounter(inputStats.getDirectBytesRead(), expectedTotal, expectedCount, minRead, maxRead); + assertThat(inputStats.getDirectBytesRead().totalNanoseconds(), equalTo(expectedCount * FAKE_CLOCK_ADVANCE_NANOS)); + + // cache file has never been written nor read + assertCounter(inputStats.getCachedBytesWritten(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getCachedBytesRead(), 0L, 0L, 0L, 0L); + assertThat(inputStats.getCachedBytesWritten().totalNanoseconds(), equalTo(0L)); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testOptimizedBytesReads() { + // use a large uncached chunk size that allows to read the file in a single operation + final ByteSizeValue uncachedChunkSize = new ByteSizeValue(1, ByteSizeUnit.GB); + executeTestCaseWithoutCache(uncachedChunkSize, (fileName, fileContent, directory) -> { + final IOContext context = newIOContext(random()); + try (IndexInput input = directory.openInput(fileName, context)) { + final IndexInputStats inputStats = directory.getStats(fileName); + assertThat(inputStats, notNullValue()); + + // read all index input sequentially as it simplifies testing + for (long i = 0L; i < input.length(); i++) { + input.readByte(); + } + + // account for internal buffered reads + final long bufferSize = BufferedIndexInput.bufferSize(context); + if (input.length() <= bufferSize) { + // file is read in a single non-optimized read operation + assertCounter(inputStats.getDirectBytesRead(), input.length(), 1L, input.length(), input.length()); + assertThat(inputStats.getDirectBytesRead().totalNanoseconds(), equalTo(FAKE_CLOCK_ADVANCE_NANOS)); + assertCounter(inputStats.getOptimizedBytesRead(), 0L, 0L, 0L, 0L); + } else { + final long remaining = input.length() % bufferSize; + final long expectedClockCounts = input.length() / bufferSize + (remaining > 0L ? 1L : 0L); + + // file is read in a single optimized read operation + IndexInputStats.TimedCounter optimizedBytesRead = inputStats.getOptimizedBytesRead(); + assertCounter(optimizedBytesRead, input.length(), 1L, input.length(), input.length()); + assertThat(optimizedBytesRead.totalNanoseconds(), equalTo(expectedClockCounts * FAKE_CLOCK_ADVANCE_NANOS)); + assertCounter(inputStats.getDirectBytesRead(), 0L, 0L, 0L, 0L); + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testReadBytesContiguously() { + executeTestCaseWithDefaultCache((fileName, fileContent, cacheDirectory) -> { + final IOContext ioContext = newIOContext(random()); + + try (IndexInput input = cacheDirectory.openInput(fileName, ioContext)) { + final IndexInputStats inputStats = cacheDirectory.getStats(fileName); + + // account for the CacheBufferedIndexInput internal buffer + final long bufferSize = (long) BufferedIndexInput.bufferSize(ioContext); + final long remaining = input.length() % bufferSize; + final long expectedTotal = input.length(); + final long expectedCount = input.length() / bufferSize + (remaining > 0L ? 1L : 0L); + final long minRead = remaining > 0L ? remaining : bufferSize; + final long maxRead = input.length() < bufferSize ? input.length() : bufferSize; + + final byte[] readBuffer = new byte[512]; + + // read the input input sequentially + for (long bytesRead = 0L; bytesRead < input.length();) { + int size = between(1, Math.toIntExact(Math.min(readBuffer.length, input.length() - bytesRead))); + input.readBytes(readBuffer, 0, size); + bytesRead += size; + + // cache file reads are aligned with internal buffered reads + long currentCount = bytesRead / bufferSize + (bytesRead % bufferSize > 0L ? 1L : 0L); + if (currentCount < expectedCount) { + assertCounter(inputStats.getContiguousReads(), currentCount * bufferSize, currentCount, bufferSize, bufferSize); + assertCounter(inputStats.getCachedBytesRead(), currentCount * bufferSize, currentCount, bufferSize, bufferSize); + + } else { + assertCounter(inputStats.getContiguousReads(), expectedTotal, expectedCount, minRead, maxRead); + assertCounter(inputStats.getCachedBytesRead(), expectedTotal, expectedCount, minRead, maxRead); + } + } + + // cache file has been written in a single chunk + assertCounter(inputStats.getCachedBytesWritten(), input.length(), 1L, input.length(), input.length()); + assertThat(inputStats.getCachedBytesWritten().totalNanoseconds(), equalTo(FAKE_CLOCK_ADVANCE_NANOS)); + + assertCounter(inputStats.getNonContiguousReads(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getDirectBytesRead(), 0L, 0L, 0L, 0L); + assertThat(inputStats.getDirectBytesRead().totalNanoseconds(), equalTo(0L)); + + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testReadBytesNonContiguously() { + executeTestCaseWithDefaultCache((fileName, fileContent, cacheDirectory) -> { + final IOContext ioContext = newIOContext(random()); + + try (IndexInput input = cacheDirectory.openInput(fileName, ioContext)) { + final IndexInputStats inputStats = cacheDirectory.getStats(fileName); + + long totalBytesRead = 0L; + long minBytesRead = Long.MAX_VALUE; + long maxBytesRead = Long.MIN_VALUE; + + for (long i = 1L; i <= randomLongBetween(1L, 10L); i++) { + final long randomPosition = randomLongBetween(1L, input.length() - 1L); + input.seek(randomPosition); + + final byte[] readBuffer = new byte[512]; + int size = between(1, Math.toIntExact(Math.min(readBuffer.length, input.length() - randomPosition))); + input.readBytes(readBuffer, 0, size); + + // BufferedIndexInput tries to read as much bytes as possible + final long bytesRead = Math.min(BufferedIndexInput.bufferSize(ioContext), input.length() - randomPosition); + totalBytesRead += bytesRead; + minBytesRead = (bytesRead < minBytesRead) ? bytesRead : minBytesRead; + maxBytesRead = (bytesRead > maxBytesRead) ? bytesRead : maxBytesRead; + + assertCounter(inputStats.getNonContiguousReads(), totalBytesRead, i, minBytesRead, maxBytesRead); + + // seek to the beginning forces a refill of the internal buffer (and simplifies a lot the test) + input.seek(0L); + } + + // cache file has been written in a single chunk + assertCounter(inputStats.getCachedBytesWritten(), input.length(), 1L, input.length(), input.length()); + assertThat(inputStats.getCachedBytesWritten().totalNanoseconds(), equalTo(FAKE_CLOCK_ADVANCE_NANOS)); + + assertCounter(inputStats.getContiguousReads(), 0L, 0L, 0L, 0L); + assertCounter(inputStats.getDirectBytesRead(), 0L, 0L, 0L, 0L); + assertThat(inputStats.getDirectBytesRead().totalNanoseconds(), equalTo(0L)); + + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testForwardSeeks() { + executeTestCaseWithDefaultCache((fileName, fileContent, cacheDirectory) -> { + final IOContext ioContext = newIOContext(random()); + try (IndexInput indexInput = cacheDirectory.openInput(fileName, ioContext)) { + IndexInput input = indexInput; + if (randomBoolean()) { + final long sliceOffset = randomLongBetween(0L, input.length() - 1L); + final long sliceLength = randomLongBetween(1L, input.length() - sliceOffset); + input = input.slice("slice", sliceOffset, sliceLength); + } + if (randomBoolean()) { + input = input.clone(); + } + + final IndexInputStats inputStats = cacheDirectory.getStats(fileName); + final IndexInputStats.Counter forwardSmallSeeksCounter = inputStats.getForwardSmallSeeks(); + assertCounter(forwardSmallSeeksCounter, 0L, 0L, 0L, 0L); + + long totalSmallSeeks = 0L; + long countSmallSeeks = 0L; + long minSmallSeeks = Long.MAX_VALUE; + long maxSmallSeeks = Long.MIN_VALUE; + + final IndexInputStats.Counter forwardLargeSeeksCounter = inputStats.getForwardLargeSeeks(); + assertCounter(forwardLargeSeeksCounter, 0L, 0L, 0L, 0L); + + long totalLargeSeeks = 0L; + long countLargeSeeks = 0L; + long minLargeSeeks = Long.MAX_VALUE; + long maxLargeSeeks = Long.MIN_VALUE; + + while (input.getFilePointer() < input.length()) { + long moveForward = randomLongBetween(1L, input.length() - input.getFilePointer()); + input.seek(input.getFilePointer() + moveForward); + + if (inputStats.isLargeSeek(moveForward)) { + minLargeSeeks = (moveForward < minLargeSeeks) ? moveForward : minLargeSeeks; + maxLargeSeeks = (moveForward > maxLargeSeeks) ? moveForward : maxLargeSeeks; + totalLargeSeeks += moveForward; + countLargeSeeks += 1; + + assertCounter(forwardLargeSeeksCounter, totalLargeSeeks, countLargeSeeks, minLargeSeeks, maxLargeSeeks); + + } else { + minSmallSeeks = (moveForward < minSmallSeeks) ? moveForward : minSmallSeeks; + maxSmallSeeks = (moveForward > maxSmallSeeks) ? moveForward : maxSmallSeeks; + totalSmallSeeks += moveForward; + countSmallSeeks += 1; + + assertCounter(forwardSmallSeeksCounter, totalSmallSeeks, countSmallSeeks, minSmallSeeks, maxSmallSeeks); + } + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testBackwardSeeks() { + executeTestCaseWithDefaultCache((fileName, fileContent, cacheDirectory) -> { + final IOContext ioContext = newIOContext(random()); + try (IndexInput indexInput = cacheDirectory.openInput(fileName, ioContext)) { + IndexInput input = indexInput; + if (randomBoolean()) { + final long sliceOffset = randomLongBetween(0L, input.length() - 1L); + final long sliceLength = randomLongBetween(1L, input.length() - sliceOffset); + input = input.slice("slice", sliceOffset, sliceLength); + } + if (randomBoolean()) { + input = input.clone(); + } + + final IndexInputStats inputStats = cacheDirectory.getStats(fileName); + final IndexInputStats.Counter backwardSmallSeeks = inputStats.getBackwardSmallSeeks(); + assertCounter(backwardSmallSeeks, 0L, 0L, 0L, 0L); + + long totalSmallSeeks = 0L; + long countSmallSeeks = 0L; + long minSmallSeeks = Long.MAX_VALUE; + long maxSmallSeeks = Long.MIN_VALUE; + + final IndexInputStats.Counter backwardLargeSeeks = inputStats.getBackwardLargeSeeks(); + assertCounter(backwardLargeSeeks, 0L, 0L, 0L, 0L); + + long totalLargeSeeks = 0L; + long countLargeSeeks = 0L; + long minLargeSeeks = Long.MAX_VALUE; + long maxLargeSeeks = Long.MIN_VALUE; + + input.seek(input.length()); + assertThat(input.getFilePointer(), equalTo(input.length())); + + do { + long moveBackward = -1L * randomLongBetween(1L, input.getFilePointer()); + input.seek(input.getFilePointer() + moveBackward); + + if (inputStats.isLargeSeek(moveBackward)) { + minLargeSeeks = (moveBackward < minLargeSeeks) ? moveBackward : minLargeSeeks; + maxLargeSeeks = (moveBackward > maxLargeSeeks) ? moveBackward : maxLargeSeeks; + totalLargeSeeks += moveBackward; + countLargeSeeks += 1; + + assertCounter(backwardLargeSeeks, totalLargeSeeks, countLargeSeeks, minLargeSeeks, maxLargeSeeks); + + } else { + minSmallSeeks = (moveBackward < minSmallSeeks) ? moveBackward : minSmallSeeks; + maxSmallSeeks = (moveBackward > maxSmallSeeks) ? moveBackward : maxSmallSeeks; + totalSmallSeeks += moveBackward; + countSmallSeeks += 1; + + assertCounter(backwardSmallSeeks, totalSmallSeeks, countSmallSeeks, minSmallSeeks, maxSmallSeeks); + } + + } while (input.getFilePointer() > 0L); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + private static void executeTestCase(final TriConsumer test) { + executeTestCase( + createCacheService(random()), + Settings.builder().put(SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), randomBoolean()).build(), + test + ); + } + + private static void executeTestCaseWithoutCache( + final ByteSizeValue uncachedChunkSize, + final TriConsumer test + ) { + executeTestCase( + createCacheService(random()), + Settings.builder() + .put(SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING.getKey(), uncachedChunkSize) + .put(SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), false) + .build(), + test + ); + } + + private static void executeTestCaseWithDefaultCache(final TriConsumer test) { + executeTestCaseWithCache( + CacheService.SNAPSHOT_CACHE_SIZE_SETTING.getDefault(Settings.EMPTY), + CacheService.SNAPSHOT_CACHE_RANGE_SIZE_SETTING.getDefault(Settings.EMPTY), + test + ); + } + + private static void executeTestCaseWithCache( + final ByteSizeValue cacheSize, + final ByteSizeValue cacheRangeSize, + final TriConsumer test + ) { + executeTestCase( + new CacheService(cacheSize, cacheRangeSize), + Settings.builder().put(SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true).build(), + test + ); + } + + private static void executeTestCase( + final CacheService cacheService, + final Settings indexSettings, + final TriConsumer test + ) { + + final byte[] fileContent = randomUnicodeOfLength(randomIntBetween(10, MAX_FILE_LENGTH)).getBytes(StandardCharsets.UTF_8); + final String fileExtension = randomAlphaOfLength(3); + final String fileName = randomAlphaOfLength(10) + '.' + fileExtension; + final SnapshotId snapshotId = new SnapshotId("_name", "_uuid"); + final IndexId indexId = new IndexId("_name", "_uuid"); + final ShardId shardId = new ShardId("_name", "_uuid", 0); + final AtomicLong fakeClock = new AtomicLong(); + final LongSupplier statsCurrentTimeNanos = () -> fakeClock.addAndGet(FAKE_CLOCK_ADVANCE_NANOS); + + final Long seekingThreshold = randomBoolean() ? randomLongBetween(1L, fileContent.length) : null; + + final String blobName = randomUnicodeOfLength(10); + final BlobContainer blobContainer = singleBlobContainer(blobName, fileContent); + final StoreFileMetadata metadata = new StoreFileMetadata(fileName, fileContent.length, "_checksum", Version.CURRENT.luceneVersion); + final List files = Collections.singletonList(new FileInfo(blobName, metadata, new ByteSizeValue(fileContent.length))); + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(), 0L, files, 0L, 0L, 0, 0L); + + try ( + CacheService ignored = cacheService; + SearchableSnapshotDirectory directory = new SearchableSnapshotDirectory( + () -> blobContainer, + () -> snapshot, + snapshotId, + indexId, + shardId, + indexSettings, + statsCurrentTimeNanos, + cacheService, + createTempDir() + ) { + @Override + protected IndexInputStats createIndexInputStats(long fileLength) { + if (seekingThreshold == null) { + return super.createIndexInputStats(fileLength); + } + return new IndexInputStats(fileLength, seekingThreshold, statsCurrentTimeNanos); + } + } + ) { + cacheService.start(); + assertThat(directory.getStats(fileName), nullValue()); + + final boolean loaded = directory.loadSnapshot(); + assertThat("Failed to load snapshot", loaded, is(true)); + assertThat("Snapshot should be loaded", directory.snapshot(), notNullValue()); + assertThat("BlobContainer should be loaded", directory.blobContainer(), notNullValue()); + + test.apply(fileName, fileContent, directory); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/SearchableSnapshotDirectoryTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/SearchableSnapshotDirectoryTests.java new file mode 100644 index 00000000000..d8af7382c28 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/SearchableSnapshotDirectoryTests.java @@ -0,0 +1,595 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store; + +import org.apache.lucene.document.Document; +import org.apache.lucene.document.Field; +import org.apache.lucene.document.NumericDocValuesField; +import org.apache.lucene.document.StringField; +import org.apache.lucene.document.TextField; +import org.apache.lucene.index.DirectoryReader; +import org.apache.lucene.index.FieldInfo; +import org.apache.lucene.index.FieldInfos; +import org.apache.lucene.index.IndexCommit; +import org.apache.lucene.index.IndexOptions; +import org.apache.lucene.index.IndexWriter; +import org.apache.lucene.index.IndexWriterConfig; +import org.apache.lucene.index.LeafReader; +import org.apache.lucene.index.SegmentInfos; +import org.apache.lucene.index.Term; +import org.apache.lucene.index.Terms; +import org.apache.lucene.search.CheckHits; +import org.apache.lucene.search.IndexSearcher; +import org.apache.lucene.search.MatchAllDocsQuery; +import org.apache.lucene.search.Query; +import org.apache.lucene.search.TermInSetQuery; +import org.apache.lucene.search.TermQuery; +import org.apache.lucene.search.TermRangeQuery; +import org.apache.lucene.store.Directory; +import org.apache.lucene.store.IOContext; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.BytesRef; +import org.elasticsearch.Version; +import org.elasticsearch.action.support.PlainActionFuture; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.metadata.RepositoryMetadata; +import org.elasticsearch.common.CheckedBiConsumer; +import org.elasticsearch.common.CheckedFunction; +import org.elasticsearch.common.UUIDs; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.fs.FsBlobContainer; +import org.elasticsearch.common.blobstore.fs.FsBlobStore; +import org.elasticsearch.common.lease.Releasable; +import org.elasticsearch.common.lease.Releasables; +import org.elasticsearch.common.lucene.BytesRefs; +import org.elasticsearch.common.lucene.Lucene; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.xcontent.NamedXContentRegistry; +import org.elasticsearch.core.internal.io.IOUtils; +import org.elasticsearch.env.Environment; +import org.elasticsearch.index.Index; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.index.seqno.SequenceNumbers; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.translog.Translog; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.repositories.blobstore.BlobStoreRepository; +import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil; +import org.elasticsearch.repositories.fs.FsRepository; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.DummyShardLock; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.IndexSettingsModule; +import org.elasticsearch.threadpool.TestThreadPool; +import org.elasticsearch.threadpool.ThreadPool; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; +import org.hamcrest.Matcher; + +import java.io.Closeable; +import java.io.EOFException; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.DirectoryStream; +import java.nio.file.Files; +import java.nio.file.NoSuchFileException; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.asList; +import static java.util.Collections.emptyMap; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.sameInstance; + +public class SearchableSnapshotDirectoryTests extends ESTestCase { + + public void testListAll() throws Exception { + testDirectories( + (directory, snapshotDirectory) -> assertThat( + snapshotDirectory.listAll(), + equalTo( + Arrays.stream(directory.listAll()) + .filter(file -> "write.lock".equals(file) == false) + .filter(file -> file.startsWith("extra") == false) + .toArray(String[]::new) + ) + ) + ); + } + + public void testFileLength() throws Exception { + testDirectories( + (directory, snapshotDirectory) -> Arrays.stream(directory.listAll()) + .filter(file -> "write.lock".equals(file) == false) + .filter(file -> file.startsWith("extra") == false) + .forEach(file -> { + try { + assertThat( + "File [" + file + "] length mismatch", + snapshotDirectory.fileLength(file), + equalTo(directory.fileLength(file)) + ); + } catch (IOException e) { + throw new AssertionError(e); + } + }) + ); + } + + public void testIndexSearcher() throws Exception { + testDirectories((directory, snapshotDirectory) -> { + try (DirectoryReader reader = DirectoryReader.open(directory)) { + final IndexSearcher searcher = newSearcher(reader); + + try (DirectoryReader snapshotReader = DirectoryReader.open(snapshotDirectory)) { + final IndexSearcher snapshotSearcher = newSearcher(snapshotReader); + { + Query query = new MatchAllDocsQuery(); + assertThat(snapshotSearcher.count(query), equalTo(searcher.count(query))); + CheckHits.checkEqual(query, snapshotSearcher.search(query, 10).scoreDocs, searcher.search(query, 10).scoreDocs); + } + { + Query query = new TermQuery(new Term("text", "fox")); + assertThat(snapshotSearcher.count(query), equalTo(searcher.count(query))); + CheckHits.checkEqual(query, snapshotSearcher.search(query, 10).scoreDocs, searcher.search(query, 10).scoreDocs); + } + { + Query query = new TermInSetQuery("text", asList(new BytesRef("quick"), new BytesRef("lazy"))); + assertThat(snapshotSearcher.count(query), equalTo(searcher.count(query))); + CheckHits.checkEqual(query, snapshotSearcher.search(query, 10).scoreDocs, searcher.search(query, 10).scoreDocs); + } + { + Query query = new TermRangeQuery( + "rank", + BytesRefs.toBytesRef(randomLongBetween(0L, 500L)), + BytesRefs.toBytesRef(randomLongBetween(501L, 1000L)), + randomBoolean(), + randomBoolean() + ); + assertThat(snapshotSearcher.count(query), equalTo(searcher.count(query))); + CheckHits.checkEqual(query, snapshotSearcher.search(query, 10).scoreDocs, searcher.search(query, 10).scoreDocs); + } + } + } + }); + } + + public void testDirectoryReader() throws Exception { + testDirectories((directory, snapshotDirectory) -> { + try (DirectoryReader reader = DirectoryReader.open(directory)) { + try (DirectoryReader snapshotReader = DirectoryReader.open(snapshotDirectory)) { + assertThat(snapshotReader.leaves(), hasSize(reader.leaves().size())); + assertThat(snapshotReader.maxDoc(), equalTo(reader.maxDoc())); + assertThat(snapshotReader.getVersion(), equalTo(reader.getVersion())); + assertThat(snapshotReader.getIndexCommit().getGeneration(), equalTo(reader.getIndexCommit().getGeneration())); + + for (int i = 0; i < reader.leaves().size(); i++) { + LeafReader leafReader = reader.leaves().get(i).reader(); + LeafReader snapshotLeafReader = snapshotReader.leaves().get(i).reader(); + assertThat(snapshotLeafReader.numDocs(), equalTo(leafReader.numDocs())); + assertThat(snapshotLeafReader.numDeletedDocs(), equalTo(leafReader.numDeletedDocs())); + assertThat(snapshotLeafReader.maxDoc(), equalTo(leafReader.maxDoc())); + + FieldInfos fieldInfos = leafReader.getFieldInfos(); + FieldInfos snapshotFieldInfos = snapshotLeafReader.getFieldInfos(); + assertThat(snapshotFieldInfos.size(), equalTo(fieldInfos.size())); + + for (int j = 0; j < fieldInfos.size(); j++) { + FieldInfo fieldInfo = fieldInfos.fieldInfo(j); + FieldInfo snapshotFieldInfo = snapshotFieldInfos.fieldInfo(j); + + assertThat(snapshotFieldInfo.name, equalTo(fieldInfo.name)); + assertThat(snapshotFieldInfo.number, equalTo(fieldInfo.number)); + + assertThat(snapshotLeafReader.getDocCount(fieldInfo.name), equalTo(leafReader.getDocCount(fieldInfo.name))); + assertThat(snapshotLeafReader.getSumDocFreq(fieldInfo.name), equalTo(leafReader.getSumDocFreq(fieldInfo.name))); + + assertThat(snapshotFieldInfo.getDocValuesType(), equalTo(fieldInfo.getDocValuesType())); + assertThat(snapshotFieldInfo.getDocValuesGen(), equalTo(fieldInfo.getDocValuesGen())); + assertThat(snapshotFieldInfo.getPointDimensionCount(), equalTo(fieldInfo.getPointDimensionCount())); + assertThat(snapshotFieldInfo.getPointIndexDimensionCount(), equalTo(fieldInfo.getPointIndexDimensionCount())); + assertThat(snapshotFieldInfo.getPointNumBytes(), equalTo(fieldInfo.getPointNumBytes())); + + if (fieldInfo.getIndexOptions() != IndexOptions.NONE) { + Terms terms = leafReader.terms(fieldInfo.name); + Terms snapshotTerms = snapshotLeafReader.terms(fieldInfo.name); + + assertThat(snapshotTerms.size(), equalTo(terms.size())); + assertThat(snapshotTerms.getDocCount(), equalTo(terms.getDocCount())); + assertThat(snapshotTerms.getMin(), equalTo(terms.getMin())); + assertThat(snapshotTerms.getMax(), equalTo(terms.getMax())); + assertThat(snapshotTerms.getSumTotalTermFreq(), equalTo(terms.getSumTotalTermFreq())); + assertThat(snapshotTerms.getSumDocFreq(), equalTo(terms.getSumDocFreq())); + } + } + } + } + } + }); + } + + public void testReadByte() throws Exception { + testIndexInputs((indexInput, snapshotIndexInput) -> { + try { + for (int i = 0; i < 10; i++) { + if (randomBoolean()) { + long position = randomLongBetween(0L, indexInput.length()); + indexInput.seek(position); + snapshotIndexInput.seek(position); + } + assertThat( + "File pointers values should be the same before reading a byte", + snapshotIndexInput, + indexInput, + IndexInput::getFilePointer + ); + + if (indexInput.getFilePointer() < indexInput.length()) { + assertThat("Read byte result should be the same", snapshotIndexInput, indexInput, IndexInput::readByte); + } else { + expectThrows(EOFException.class, snapshotIndexInput::readByte); + } + assertThat( + "File pointers values should be the same after reading a byte", + snapshotIndexInput, + indexInput, + IndexInput::getFilePointer + ); + } + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + public void testReadBytes() throws Exception { + final byte[] buffer = new byte[8192]; + final byte[] snapshotBuffer = new byte[buffer.length]; + + testIndexInputs((indexInput, snapshotIndexInput) -> { + try { + if (randomBoolean()) { + long position = randomLongBetween(0L, indexInput.length()); + indexInput.seek(position); + snapshotIndexInput.seek(position); + } + assertThat( + "File pointers values should be the same before reading a byte", + snapshotIndexInput, + indexInput, + IndexInput::getFilePointer + ); + + int available = Math.toIntExact(indexInput.length() - indexInput.getFilePointer()); + if (available == 0) { + expectThrows(EOFException.class, () -> snapshotIndexInput.readBytes(snapshotBuffer, 0, snapshotBuffer.length)); + return; + } + + int length = randomIntBetween(1, Math.min(available, buffer.length)); + + Arrays.fill(buffer, (byte) 0); + indexInput.readBytes(buffer, 0, length); + + Arrays.fill(snapshotBuffer, (byte) 0); + snapshotIndexInput.readBytes(snapshotBuffer, 0, length); + + assertThat( + "File pointers values should be the same after reading a byte", + snapshotIndexInput, + indexInput, + IndexInput::getFilePointer + ); + assertArrayEquals(snapshotBuffer, buffer); + } catch (IOException e) { + throw new AssertionError(e); + } + }); + } + + /** + * This method : + * - sets up a default {@link Directory} and index random documents + * - snapshots the directory using a FS repository + * - creates a {@link SearchableSnapshotDirectory} instance based on the snapshotted files + * - consumes the default and the searchable snapshot directories using the {@link CheckedBiConsumer}. + */ + private void testDirectories(final CheckedBiConsumer consumer) throws Exception { + final IndexSettings indexSettings = IndexSettingsModule.newIndexSettings( + "_index", + Settings.builder() + .put(IndexMetadata.SETTING_INDEX_UUID, UUIDs.randomBase64UUID(random())) + .put(IndexMetadata.SETTING_VERSION_CREATED, org.elasticsearch.Version.CURRENT) + .build() + ); + final ShardId shardId = new ShardId(indexSettings.getIndex(), randomIntBetween(0, 10)); + final List releasables = new ArrayList<>(); + + try (Directory directory = newDirectory()) { + final IndexWriterConfig indexWriterConfig = newIndexWriterConfig(); + try (IndexWriter writer = new IndexWriter(directory, indexWriterConfig)) { + final int nbDocs = scaledRandomIntBetween(0, 1_000); + final List words = asList("the", "quick", "brown", "fox", "jumps", "over", "the", "lazy", "dog"); + for (int i = 0; i < nbDocs; i++) { + final Document doc = new Document(); + doc.add(new StringField("id", "" + i, Field.Store.YES)); + String text = String.join(" ", randomSubsetOf(randomIntBetween(1, words.size()), words)); + doc.add(new TextField("text", text, Field.Store.YES)); + doc.add(new NumericDocValuesField("rank", i)); + writer.addDocument(doc); + } + if (randomBoolean()) { + writer.flush(); + } + if (randomBoolean()) { + writer.forceMerge(1, true); + } + final Map userData = new HashMap<>(2); + userData.put(SequenceNumbers.LOCAL_CHECKPOINT_KEY, "0"); + userData.put(Translog.TRANSLOG_UUID_KEY, UUIDs.randomBase64UUID(random())); + writer.setLiveCommitData(userData.entrySet()); + writer.commit(); + } + + final ThreadPool threadPool = new TestThreadPool(getClass().getSimpleName()); + releasables.add(() -> terminate(threadPool)); + + final Store store = new Store(shardId, indexSettings, directory, new DummyShardLock(shardId)); + store.incRef(); + releasables.add(store::decRef); + try { + final SegmentInfos segmentInfos = Lucene.readSegmentInfos(store.directory()); + final IndexCommit indexCommit = Lucene.getIndexCommit(segmentInfos, store.directory()); + + Path repositoryPath = createTempDir(); + Settings.Builder repositorySettings = Settings.builder().put("location", repositoryPath); + boolean compress = randomBoolean(); + if (compress) { + repositorySettings.put("compress", randomBoolean()); + } + if (randomBoolean()) { + repositorySettings.put("base_path", randomAlphaOfLengthBetween(3, 10)); + } + if (randomBoolean()) { + repositorySettings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES); + } + + final String repositoryName = randomAlphaOfLength(10); + final RepositoryMetadata repositoryMetadata = new RepositoryMetadata( + repositoryName, + FsRepository.TYPE, + repositorySettings.build() + ); + + final BlobStoreRepository repository = new FsRepository( + repositoryMetadata, + new Environment( + Settings.builder() + .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath()) + .put(Environment.PATH_REPO_SETTING.getKey(), repositoryPath.toAbsolutePath()) + .putList(Environment.PATH_DATA_SETTING.getKey(), tmpPaths()) + .build(), + null + ), + NamedXContentRegistry.EMPTY, + BlobStoreTestUtil.mockClusterService(repositoryMetadata) + ) { + + @Override + protected void assertSnapshotOrGenericThread() { + // eliminate thread name check as we create repo manually on test/main threads + } + }; + repository.start(); + releasables.add(repository::stop); + + final SnapshotId snapshotId = new SnapshotId("_snapshot", UUIDs.randomBase64UUID(random())); + final IndexId indexId = new IndexId(indexSettings.getIndex().getName(), UUIDs.randomBase64UUID(random())); + + final PlainActionFuture future = PlainActionFuture.newFuture(); + threadPool.generic().submit(() -> { + IndexShardSnapshotStatus snapshotStatus = IndexShardSnapshotStatus.newInitializing(null); + repository.snapshotShard( + store, + null, + snapshotId, + indexId, + indexCommit, + null, + snapshotStatus, + Version.CURRENT, + emptyMap(), + future + ); + future.actionGet(); + }); + future.actionGet(); + + final BlobContainer blobContainer = repository.shardContainer(indexId, shardId.id()); + final BlobStoreIndexShardSnapshot snapshot = repository.loadShardSnapshot(blobContainer, snapshotId); + + final Path cacheDir = createTempDir(); + final CacheService cacheService = new CacheService(Settings.EMPTY); + releasables.add(cacheService); + cacheService.start(); + + try ( + SearchableSnapshotDirectory snapshotDirectory = new SearchableSnapshotDirectory( + () -> blobContainer, + () -> snapshot, + snapshotId, + indexId, + shardId, + Settings.builder().put(SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), randomBoolean()).build(), + () -> 0L, + cacheService, + cacheDir + ) + ) { + final boolean loaded = snapshotDirectory.loadSnapshot(); + assertThat("Failed to load snapshot", loaded, is(true)); + assertThat("Snapshot should be loaded", snapshotDirectory.snapshot(), sameInstance(snapshot)); + assertThat("BlobContainer should be loaded", snapshotDirectory.blobContainer(), sameInstance(blobContainer)); + + consumer.accept(directory, snapshotDirectory); + } + } finally { + Releasables.close(releasables); + } + } + } + + private void testIndexInputs(final CheckedBiConsumer consumer) throws Exception { + testDirectories((directory, snapshotDirectory) -> { + for (String fileName : randomSubsetOf(asList(snapshotDirectory.listAll()))) { + final IOContext context = newIOContext(random()); + try (IndexInput indexInput = directory.openInput(fileName, context)) { + final List closeables = new ArrayList<>(); + try { + IndexInput snapshotIndexInput = snapshotDirectory.openInput(fileName, context); + closeables.add(snapshotIndexInput); + if (randomBoolean()) { + snapshotIndexInput = snapshotIndexInput.clone(); + } + consumer.accept(indexInput, snapshotIndexInput); + } finally { + IOUtils.close(closeables); + } + } + } + }); + } + + public void testClearCache() throws Exception { + try (CacheService cacheService = new CacheService(Settings.EMPTY)) { + cacheService.start(); + + final int nbRandomFiles = randomIntBetween(3, 10); + final List randomFiles = new ArrayList<>(nbRandomFiles); + + final Path shardSnapshotDir = createTempDir(); + for (int i = 0; i < nbRandomFiles; i++) { + final String fileName = "file_" + randomAlphaOfLength(10); + final byte[] fileContent = randomUnicodeOfLength(randomIntBetween(1, 100_000)).getBytes(StandardCharsets.UTF_8); + final String blobName = randomAlphaOfLength(15); + Files.write(shardSnapshotDir.resolve(blobName), fileContent, StandardOpenOption.CREATE_NEW); + randomFiles.add( + new BlobStoreIndexShardSnapshot.FileInfo( + blobName, + new StoreFileMetadata(fileName, fileContent.length, "_check", Version.CURRENT.luceneVersion), + new ByteSizeValue(fileContent.length) + ) + ); + } + + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot("_snapshot", 0L, randomFiles, 0L, 0L, 0, 0L); + final BlobContainer blobContainer = new FsBlobContainer( + new FsBlobStore(Settings.EMPTY, shardSnapshotDir, true), + BlobPath.cleanPath(), + shardSnapshotDir + ); + + final SnapshotId snapshotId = new SnapshotId("_name", "_uuid"); + final IndexId indexId = new IndexId("_id", "_uuid"); + final ShardId shardId = new ShardId(new Index("_name", "_id"), 0); + + final Path cacheDir = createTempDir(); + try ( + SearchableSnapshotDirectory directory = new SearchableSnapshotDirectory( + () -> blobContainer, + () -> snapshot, + snapshotId, + indexId, + shardId, + Settings.builder().put(SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true).build(), + () -> 0L, + cacheService, + cacheDir + ) + ) { + + final boolean loaded = directory.loadSnapshot(); + assertThat("Failed to load snapshot", loaded, is(true)); + assertThat("Snapshot should be loaded", directory.snapshot(), sameInstance(snapshot)); + assertThat("BlobContainer should be loaded", directory.blobContainer(), sameInstance(blobContainer)); + + final byte[] buffer = new byte[1024]; + for (int i = 0; i < randomIntBetween(10, 50); i++) { + final BlobStoreIndexShardSnapshot.FileInfo fileInfo = randomFrom(randomFiles); + final int fileLength = Math.toIntExact(fileInfo.length()); + + try (IndexInput input = directory.openInput(fileInfo.physicalName(), newIOContext(random()))) { + assertThat(input.length(), equalTo((long) fileLength)); + final int start = between(0, fileLength - 1); + final int end = between(start + 1, fileLength); + + input.seek(start); + while (input.getFilePointer() < end) { + input.readBytes(buffer, 0, Math.toIntExact(Math.min(buffer.length, end - input.getFilePointer()))); + } + } + assertListOfFiles(cacheDir, allOf(greaterThan(0), lessThanOrEqualTo(nbRandomFiles)), greaterThan(0L)); + if (randomBoolean()) { + directory.clearCache(); + assertListOfFiles(cacheDir, equalTo(0), equalTo(0L)); + } + } + } + } + } + + private static void assertThat( + String reason, + IndexInput actual, + IndexInput expected, + CheckedFunction eval + ) throws IOException { + assertThat( + reason + "\n\t actual index input: " + actual.toString() + "\n\texpected index input: " + expected.toString(), + eval.apply(actual), + equalTo(eval.apply(expected)) + ); + } + + private void assertListOfFiles(Path cacheDir, Matcher matchNumberOfFiles, Matcher matchSizeOfFiles) throws IOException { + final Map files = new HashMap<>(); + try (DirectoryStream stream = Files.newDirectoryStream(cacheDir)) { + for (Path file : stream) { + final String fileName = file.getFileName().toString(); + if (fileName.equals("write.lock") || fileName.startsWith("extra")) { + continue; + } + try { + if (Files.isRegularFile(file)) { + final BasicFileAttributes fileAttributes = Files.readAttributes(file, BasicFileAttributes.class); + files.put(fileName, fileAttributes.size()); + } + } catch (FileNotFoundException | NoSuchFileException e) { + // ignoring as the cache file might be evicted + } + } + } + assertThat("Number of files (" + files.size() + ") mismatch, got : " + files.keySet(), files.size(), matchNumberOfFiles); + assertThat("Sum of file sizes mismatch, got: " + files, files.values().stream().mapToLong(Long::longValue).sum(), matchSizeOfFiles); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CacheFileTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CacheFileTests.java new file mode 100644 index 00000000000..331d5ec006e --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CacheFileTests.java @@ -0,0 +1,144 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.apache.lucene.util.SetOnce; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.index.store.cache.CacheFile.EvictionListener; + +import java.io.IOException; +import java.nio.channels.FileChannel; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Objects; + +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; +import static org.hamcrest.Matchers.nullValue; +import static org.hamcrest.Matchers.sameInstance; + +public class CacheFileTests extends ESTestCase { + + public void testAcquireAndRelease() throws Exception { + final Path file = createTempDir().resolve("file.cache"); + final CacheFile cacheFile = new CacheFile("test", randomLongBetween(1, 100), file, randomIntBetween(1, 100)); + + assertThat("Cache file is not acquired: no channel exists", cacheFile.getChannel(), nullValue()); + assertThat("Cache file is not acquired: file does not exist", Files.exists(file), is(false)); + + final TestEvictionListener listener = new TestEvictionListener(); + boolean acquired = cacheFile.acquire(listener); + assertThat("Cache file has been acquired", acquired, is(true)); + assertThat("Cache file has been acquired: file should exists", Files.exists(file), is(true)); + assertThat("Cache file has been acquired: channel should exists", cacheFile.getChannel(), notNullValue()); + assertThat("Cache file has been acquired: channel is open", cacheFile.getChannel().isOpen(), is(true)); + assertThat("Cache file has been acquired: eviction listener is not executed", listener.isCalled(), is(false)); + + boolean released = cacheFile.release(listener); + assertThat("Cache file has been released", released, is(true)); + assertThat("Cache file has been released: eviction listener is not executed", listener.isCalled(), is(false)); + assertThat("Cache file has been released: channel does not exist", cacheFile.getChannel(), nullValue()); + assertThat("Cache file is not evicted: file still exists after release", Files.exists(file), is(true)); + + acquired = cacheFile.acquire(listener); + assertThat("Cache file is acquired again", acquired, is(true)); + + FileChannel fileChannel = cacheFile.getChannel(); + assertThat("Channel should exists", fileChannel, notNullValue()); + assertThat("Channel is open", fileChannel.isOpen(), is(true)); + + assertThat("Cache file is not evicted: eviction listener is not executed notified", listener.isCalled(), is(false)); + cacheFile.startEviction(); + + assertThat("Cache file has been evicted: eviction listener was executed", listener.isCalled(), is(true)); + assertThat("Cache file is evicted but not fully released: file still exists", Files.exists(file), is(true)); + assertThat("Cache file is evicted but not fully released: channel still exists", cacheFile.getChannel(), notNullValue()); + assertThat("Cache file is evicted but not fully released: channel is open", cacheFile.getChannel().isOpen(), is(true)); + assertThat("Channel didn't change after eviction", cacheFile.getChannel(), sameInstance(fileChannel)); + + released = cacheFile.release(listener); + assertTrue("Cache file is fully released", released); + assertThat("Cache file evicted and fully released: channel does not exist", cacheFile.getChannel(), nullValue()); + assertThat("Cache file has been deleted", Files.exists(file), is(false)); + } + + public void testCacheFileNotAcquired() throws IOException { + final Path file = createTempDir().resolve("file.cache"); + final CacheFile cacheFile = new CacheFile("test", randomLongBetween(1, 100), file, randomIntBetween(1, 100)); + + assertThat(Files.exists(file), is(false)); + assertThat(cacheFile.getChannel(), nullValue()); + + if (randomBoolean()) { + final TestEvictionListener listener = new TestEvictionListener(); + boolean acquired = cacheFile.acquire(listener); + assertTrue("Cache file is acquired", acquired); + + assertThat(cacheFile.getChannel(), notNullValue()); + assertThat(Files.exists(file), is(true)); + + boolean released = cacheFile.release(listener); + assertTrue("Cache file is released", released); + } + + cacheFile.startEviction(); + assertThat(cacheFile.getChannel(), nullValue()); + assertFalse(Files.exists(file)); + } + + public void testDeleteOnCloseAfterLastRelease() throws Exception { + final Path file = createTempDir().resolve("file.cache"); + final CacheFile cacheFile = new CacheFile("test", randomLongBetween(1, 100), file, randomIntBetween(1, 100)); + + final List acquiredListeners = new ArrayList<>(); + for (int i = 0; i < randomIntBetween(1, 20); i++) { + TestEvictionListener listener = new TestEvictionListener(); + assertTrue(cacheFile.acquire(listener)); + assertThat(cacheFile.getChannel(), notNullValue()); + acquiredListeners.add(listener); + } + + final List releasedListeners = new ArrayList<>(); + for (Iterator it = acquiredListeners.iterator(); it.hasNext();) { + if (randomBoolean()) { + TestEvictionListener listener = it.next(); + releasedListeners.add(listener); + cacheFile.release(listener); + it.remove(); + } + } + + assertTrue(Files.exists(file)); + cacheFile.startEviction(); + + releasedListeners.forEach(l -> assertFalse("Released listeners before cache file eviction are not called", l.isCalled())); + acquiredListeners.forEach(l -> assertTrue("Released listeners after cache file eviction are called", l.isCalled())); + acquiredListeners.forEach(cacheFile::release); + + assertFalse(Files.exists(file)); + } + + class TestEvictionListener implements EvictionListener { + + private SetOnce evicted = new SetOnce<>(); + + CacheFile getEvictedCacheFile() { + return evicted.get(); + } + + boolean isCalled() { + return getEvictedCacheFile() != null; + } + + @Override + public void onEviction(CacheFile evictedCacheFile) { + evicted.set(Objects.requireNonNull(evictedCacheFile)); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CacheKeyTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CacheKeyTests.java new file mode 100644 index 00000000000..af18dda3d44 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CacheKeyTests.java @@ -0,0 +1,110 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.elasticsearch.index.Index; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.test.EqualsHashCodeTestUtils; + +import static org.hamcrest.Matchers.equalTo; + +public class CacheKeyTests extends ESTestCase { + + public void testEqualsAndHashCode() { + EqualsHashCodeTestUtils.checkEqualsAndHashCode(createInstance(), this::copy, this::mutate); + } + + public void testBelongsTo() { + final CacheKey cacheKey = createInstance(); + + SnapshotId snapshotId = cacheKey.getSnapshotId(); + IndexId indexId = cacheKey.getIndexId(); + ShardId shardId = cacheKey.getShardId(); + + final boolean belongsTo; + switch (randomInt(2)) { + case 0: + snapshotId = randomValueOtherThan(cacheKey.getSnapshotId(), this::randomSnapshotId); + belongsTo = false; + break; + case 1: + indexId = randomValueOtherThan(cacheKey.getIndexId(), this::randomIndexId); + belongsTo = false; + break; + case 2: + shardId = randomValueOtherThan(cacheKey.getShardId(), this::randomShardId); + belongsTo = false; + break; + case 3: + belongsTo = true; + break; + default: + throw new AssertionError("Unsupported value"); + } + + assertThat(cacheKey.belongsTo(snapshotId, indexId, shardId), equalTo(belongsTo)); + } + + private SnapshotId randomSnapshotId() { + return new SnapshotId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)); + } + + private IndexId randomIndexId() { + return new IndexId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10)); + } + + private ShardId randomShardId() { + return new ShardId(randomAlphaOfLengthBetween(5, 10), randomAlphaOfLengthBetween(5, 10), randomInt(5)); + } + + private CacheKey createInstance() { + return new CacheKey(randomSnapshotId(), randomIndexId(), randomShardId(), randomAlphaOfLengthBetween(5, 10)); + } + + private CacheKey copy(final CacheKey origin) { + SnapshotId snapshotId = origin.getSnapshotId(); + if (randomBoolean()) { + snapshotId = new SnapshotId(origin.getSnapshotId().getName(), origin.getSnapshotId().getUUID()); + } + IndexId indexId = origin.getIndexId(); + if (randomBoolean()) { + indexId = new IndexId(origin.getIndexId().getName(), origin.getIndexId().getId()); + } + ShardId shardId = origin.getShardId(); + if (randomBoolean()) { + shardId = new ShardId(new Index(shardId.getIndex().getName(), shardId.getIndex().getUUID()), shardId.id()); + } + return new CacheKey(snapshotId, indexId, shardId, origin.getFileName()); + } + + private CacheKey mutate(CacheKey origin) { + SnapshotId snapshotId = origin.getSnapshotId(); + IndexId indexId = origin.getIndexId(); + ShardId shardId = origin.getShardId(); + String fileName = origin.getFileName(); + + switch (randomInt(3)) { + case 0: + snapshotId = randomValueOtherThan(origin.getSnapshotId(), this::randomSnapshotId); + break; + case 1: + indexId = randomValueOtherThan(origin.getIndexId(), this::randomIndexId); + break; + case 2: + shardId = randomValueOtherThan(origin.getShardId(), this::randomShardId); + break; + case 3: + fileName = randomValueOtherThan(origin.getFileName(), () -> randomAlphaOfLengthBetween(5, 10)); + break; + default: + throw new AssertionError("Unsupported mutation"); + } + return new CacheKey(snapshotId, indexId, shardId, fileName); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInputTests.java new file mode 100644 index 00000000000..149b208edcc --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/CachedBlobContainerIndexInputTests.java @@ -0,0 +1,309 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.apache.lucene.store.IndexInput; +import org.elasticsearch.Version; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.lucene.store.ESIndexInputTestCase; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.shard.ShardId; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot; +import org.elasticsearch.index.store.SearchableSnapshotDirectory; +import org.elasticsearch.index.store.StoreFileMetadata; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.snapshots.mockstore.BlobContainerWrapper; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; + +import java.io.EOFException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.HashSet; +import java.util.Objects; +import java.util.concurrent.atomic.LongAdder; + +import static java.util.Collections.singletonList; +import static org.elasticsearch.index.store.cache.TestUtils.createCacheService; +import static org.elasticsearch.index.store.cache.TestUtils.singleBlobContainer; +import static org.elasticsearch.index.store.cache.TestUtils.singleSplitBlobContainer; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.notNullValue; + +public class CachedBlobContainerIndexInputTests extends ESIndexInputTestCase { + + public void testRandomReads() throws IOException { + try (CacheService cacheService = createCacheService(random())) { + cacheService.start(); + + SnapshotId snapshotId = new SnapshotId("_name", "_uuid"); + IndexId indexId = new IndexId("_name", "_uuid"); + ShardId shardId = new ShardId("_name", "_uuid", 0); + + for (int i = 0; i < 5; i++) { + final String fileName = randomAlphaOfLength(10); + final byte[] input = randomUnicodeOfLength(randomIntBetween(1, 100_000)).getBytes(StandardCharsets.UTF_8); + + final String blobName = randomUnicodeOfLength(10); + final StoreFileMetadata metaData = new StoreFileMetadata(fileName, input.length, "_na", Version.CURRENT.luceneVersion); + + final int partSize = randomBoolean() ? input.length : randomIntBetween(1, input.length); + + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot( + snapshotId.getName(), + 0L, + singletonList(new BlobStoreIndexShardSnapshot.FileInfo(blobName, metaData, new ByteSizeValue(partSize))), + 0L, + 0L, + 0, + 0L + ); + + final BlobContainer singleBlobContainer = singleSplitBlobContainer(blobName, input, partSize); + final BlobContainer blobContainer; + if (input.length == partSize && input.length <= cacheService.getCacheSize()) { + blobContainer = new CountingBlobContainer(singleBlobContainer, cacheService.getRangeSize()); + } else { + blobContainer = singleBlobContainer; + } + + final Path cacheDir = createTempDir(); + try ( + SearchableSnapshotDirectory directory = new SearchableSnapshotDirectory( + () -> blobContainer, + () -> snapshot, + snapshotId, + indexId, + shardId, + Settings.builder().put(SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), true).build(), + () -> 0L, + cacheService, + cacheDir + ) + ) { + final boolean loaded = directory.loadSnapshot(); + assertThat("Failed to load snapshot", loaded, is(true)); + assertThat("Snapshot should be loaded", directory.snapshot(), notNullValue()); + assertThat("BlobContainer should be loaded", directory.blobContainer(), notNullValue()); + + try (IndexInput indexInput = directory.openInput(fileName, newIOContext(random()))) { + assertEquals(input.length, indexInput.length()); + assertEquals(0, indexInput.getFilePointer()); + byte[] output = randomReadAndSlice(indexInput, input.length); + assertArrayEquals(input, output); + } + } + + if (blobContainer instanceof CountingBlobContainer) { + long numberOfRanges = TestUtils.numberOfRanges(input.length, cacheService.getRangeSize()); + assertThat( + "Expected " + numberOfRanges + " ranges fetched from the source", + ((CountingBlobContainer) blobContainer).totalOpens.sum(), + equalTo(numberOfRanges) + ); + assertThat( + "All bytes should have been read from source", + ((CountingBlobContainer) blobContainer).totalBytes.sum(), + equalTo((long) input.length) + ); + } + } + } + } + + public void testThrowsEOFException() throws IOException { + try (CacheService cacheService = createCacheService(random())) { + cacheService.start(); + + SnapshotId snapshotId = new SnapshotId("_name", "_uuid"); + IndexId indexId = new IndexId("_name", "_uuid"); + ShardId shardId = new ShardId("_name", "_uuid", 0); + + final String fileName = randomAlphaOfLength(10); + final byte[] input = randomUnicodeOfLength(randomIntBetween(1, 100_000)).getBytes(StandardCharsets.UTF_8); + + final String blobName = randomUnicodeOfLength(10); + final StoreFileMetadata metadata = new StoreFileMetadata(fileName, input.length + 1, "_na", Version.CURRENT.luceneVersion); + final BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot( + snapshotId.getName(), + 0L, + singletonList(new BlobStoreIndexShardSnapshot.FileInfo(blobName, metadata, new ByteSizeValue(input.length + 1))), + 0L, + 0L, + 0, + 0L + ); + + final BlobContainer blobContainer = singleBlobContainer(blobName, input); + + final Path cacheDir = createTempDir(); + try ( + SearchableSnapshotDirectory searchableSnapshotDirectory = new SearchableSnapshotDirectory( + () -> blobContainer, + () -> snapshot, + snapshotId, + indexId, + shardId, + Settings.EMPTY, + () -> 0L, + cacheService, + cacheDir + ) + ) { + final boolean loaded = searchableSnapshotDirectory.loadSnapshot(); + assertThat("Failed to load snapshot", loaded, is(true)); + assertThat("Snapshot should be loaded", searchableSnapshotDirectory.snapshot(), notNullValue()); + assertThat("BlobContainer should be loaded", searchableSnapshotDirectory.blobContainer(), notNullValue()); + + try (IndexInput indexInput = searchableSnapshotDirectory.openInput(fileName, newIOContext(random()))) { + final byte[] buffer = new byte[input.length + 1]; + final IOException exception = expectThrows(IOException.class, () -> indexInput.readBytes(buffer, 0, buffer.length)); + if (containsEOFException(exception, new HashSet<>()) == false) { + throw new AssertionError("inner EOFException not thrown", exception); + } + } + } + } + } + + private boolean containsEOFException(Throwable throwable, HashSet seenThrowables) { + if (throwable == null || seenThrowables.add(throwable) == false) { + return false; + } + if (throwable instanceof EOFException) { + return true; + } + for (Throwable suppressed : throwable.getSuppressed()) { + if (containsEOFException(suppressed, seenThrowables)) { + return true; + } + } + return containsEOFException(throwable.getCause(), seenThrowables); + } + + /** + * BlobContainer that counts the number of {@link java.io.InputStream} it opens, as well as the + * total number of bytes read from them. + */ + private static class CountingBlobContainer extends BlobContainerWrapper { + + private final LongAdder totalBytes = new LongAdder(); + private final LongAdder totalOpens = new LongAdder(); + + private final int rangeSize; + + CountingBlobContainer(BlobContainer in, int rangeSize) { + super(in); + this.rangeSize = rangeSize; + } + + @Override + public InputStream readBlob(String blobName, long position, long length) throws IOException { + return new CountingInputStream(this, super.readBlob(blobName, position, length), length, rangeSize); + } + + @Override + public InputStream readBlob(String name) { + assert false : "this method should never be called"; + throw new UnsupportedOperationException(); + } + } + + /** + * InputStream that counts the number of bytes read from it, as well as the positions + * where read operations start and finish. + */ + private static class CountingInputStream extends FilterInputStream { + + private final CountingBlobContainer container; + private final int rangeSize; + private final long length; + + private long bytesRead = 0L; + private long position = 0L; + private long start = Long.MAX_VALUE; + private long end = Long.MIN_VALUE; + + CountingInputStream(CountingBlobContainer container, InputStream input, long length, int rangeSize) { + super(input); + this.container = Objects.requireNonNull(container); + this.rangeSize = rangeSize; + this.length = length; + this.container.totalOpens.increment(); + } + + @Override + public int read() throws IOException { + if (position < start) { + start = position; + } + + final int result = in.read(); + if (result == -1) { + return result; + } + bytesRead += 1L; + position += 1L; + + if (position > end) { + end = position; + } + return result; + } + + @Override + public int read(byte[] b, int offset, int len) throws IOException { + if (position < start) { + start = position; + } + + final int result = in.read(b, offset, len); + bytesRead += len; + position += len; + + if (position > end) { + end = position; + } + return result; + } + + @Override + public void close() throws IOException { + in.close(); + if (start % rangeSize != 0) { + throw new AssertionError("Read operation should start at the beginning of a range"); + } + if (end % rangeSize != 0) { + if (end != length) { + throw new AssertionError("Read operation should finish at the end of a range or the end of the file"); + } + } + if (length <= rangeSize) { + if (bytesRead != length) { + throw new AssertionError("All [" + length + "] bytes should have been read, no more no less but got:" + bytesRead); + } + } else { + if (bytesRead != rangeSize) { + if (end != length) { + throw new AssertionError("Expecting [" + rangeSize + "] bytes to be read but got:" + bytesRead); + + } + final long remaining = length % rangeSize; + if (bytesRead != remaining) { + throw new AssertionError("Expecting [" + remaining + "] bytes to be read but got:" + bytesRead); + } + } + } + this.container.totalBytes.add(bytesRead); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/SparseFileTrackerTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/SparseFileTrackerTests.java new file mode 100644 index 00000000000..23e568550f7 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/SparseFileTrackerTests.java @@ -0,0 +1,232 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.elasticsearch.ElasticsearchException; +import org.elasticsearch.action.ActionListener; +import org.elasticsearch.cluster.coordination.DeterministicTaskQueue; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.test.ESTestCase; + +import java.util.HashSet; +import java.util.List; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.Semaphore; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Consumer; + +import static org.elasticsearch.common.util.concurrent.ConcurrentCollections.newConcurrentSet; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.empty; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SparseFileTrackerTests extends ESTestCase { + + // these tests model the file as a byte[] which starts out entirely unavailable and becomes available over time on a byte-by-byte basis + private static final byte UNAVAILABLE = (byte) 0x00; + private static final byte AVAILABLE = (byte) 0xff; + + public void testInvalidLength() { + IllegalArgumentException e = expectThrows(IllegalArgumentException.class, () -> new SparseFileTracker("test", -1L)); + assertThat(e.getMessage(), containsString("Length [-1] must be equal to or greater than 0 for [test]")); + } + + public void testInvalidRange() { + final byte[] fileContents = new byte[between(0, 1000)]; + final long length = fileContents.length; + final SparseFileTracker sparseFileTracker = new SparseFileTracker("test", length); + + final AtomicBoolean invoked = new AtomicBoolean(false); + final ActionListener listener = ActionListener.wrap(() -> invoked.set(true)); + + IllegalArgumentException e = expectThrows( + IllegalArgumentException.class, + () -> sparseFileTracker.waitForRange(-1L, randomLongBetween(0L, length), listener) + ); + assertThat("start must not be negative", e.getMessage(), containsString("invalid range")); + assertThat(invoked.get(), is(false)); + + e = expectThrows( + IllegalArgumentException.class, + () -> sparseFileTracker.waitForRange(randomLongBetween(0L, Math.max(0L, length - 1L)), length + 1L, listener) + ); + assertThat("end must not be greater than length", e.getMessage(), containsString("invalid range")); + assertThat(invoked.get(), is(false)); + + if (length > 1L) { + e = expectThrows(IllegalArgumentException.class, () -> { + long start = randomLongBetween(1L, Math.max(1L, length - 1L)); + long end = randomLongBetween(0L, start - 1L); + sparseFileTracker.waitForRange(start, end, listener); + }); + assertThat("end must not be greater than length", e.getMessage(), containsString("invalid range")); + assertThat(invoked.get(), is(false)); + } + } + + public void testCallsListenerWhenWholeRangeIsAvailable() { + final byte[] fileContents = new byte[between(0, 1000)]; + final SparseFileTracker sparseFileTracker = new SparseFileTracker("test", fileContents.length); + + final Set listenersCalled = new HashSet<>(); + for (int i = between(0, 10); i > 0; i--) { + waitForRandomRange(fileContents, sparseFileTracker, listenersCalled::add, gap -> processGap(fileContents, gap)); + assertTrue(listenersCalled.stream().allMatch(AtomicBoolean::get)); + } + + final long start = randomLongBetween(0L, Math.max(0L, fileContents.length - 1)); + final long end = randomLongBetween(start, fileContents.length); + boolean pending = false; + for (long i = start; i < end; i++) { + if (fileContents[Math.toIntExact(i)] == UNAVAILABLE) { + pending = true; + } + } + + if (pending) { + final AtomicBoolean expectNotification = new AtomicBoolean(); + final AtomicBoolean wasNotified = new AtomicBoolean(); + final List gaps = sparseFileTracker.waitForRange(start, end, ActionListener.wrap(ignored -> { + assertTrue(expectNotification.get()); + assertTrue(wasNotified.compareAndSet(false, true)); + }, e -> { throw new AssertionError(e); })); + for (int gapIndex = 0; gapIndex < gaps.size(); gapIndex++) { + final SparseFileTracker.Gap gap = gaps.get(gapIndex); + assertThat(gap.start, greaterThanOrEqualTo(start)); + assertThat(gap.end, lessThanOrEqualTo(end)); + for (long i = gap.start; i < gap.end; i++) { + assertThat(fileContents[Math.toIntExact(i)], equalTo(UNAVAILABLE)); + fileContents[Math.toIntExact(i)] = AVAILABLE; + } + assertFalse(wasNotified.get()); + if (gapIndex == gaps.size() - 1) { + expectNotification.set(true); + } + gap.onResponse(null); + } + assertTrue(wasNotified.get()); + } + + final AtomicBoolean wasNotified = new AtomicBoolean(); + final List gaps = sparseFileTracker.waitForRange( + start, + end, + ActionListener.wrap(ignored -> assertTrue(wasNotified.compareAndSet(false, true)), e -> { throw new AssertionError(e); }) + ); + assertThat(gaps, empty()); + assertTrue(wasNotified.get()); + } + + public void testDeterministicSafety() { + final byte[] fileContents = new byte[between(0, 1000)]; + final SparseFileTracker sparseFileTracker = new SparseFileTracker("test", fileContents.length); + final Set listenersCalled = new HashSet<>(); + + final DeterministicTaskQueue deterministicTaskQueue = new DeterministicTaskQueue(Settings.EMPTY, random()); + + deterministicTaskQueue.setExecutionDelayVariabilityMillis(1000); + + for (int i = between(1, 1000); i > 0; i--) { + deterministicTaskQueue.scheduleNow( + () -> waitForRandomRange( + fileContents, + sparseFileTracker, + listenersCalled::add, + gap -> deterministicTaskQueue.scheduleNow(() -> processGap(fileContents, gap)) + ) + ); + } + + deterministicTaskQueue.runAllTasks(); + assertTrue(listenersCalled.stream().allMatch(AtomicBoolean::get)); + } + + public void testThreadSafety() throws InterruptedException { + final byte[] fileContents = new byte[between(0, 1000)]; + final Thread[] threads = new Thread[between(1, 5)]; + final SparseFileTracker sparseFileTracker = new SparseFileTracker("test", fileContents.length); + + final CountDownLatch startLatch = new CountDownLatch(1); + final Semaphore countDown = new Semaphore(between(1, 1000)); + final Set listenersCalled = newConcurrentSet(); + for (int threadIndex = 0; threadIndex < threads.length; threadIndex++) { + threads[threadIndex] = new Thread(() -> { + try { + startLatch.await(); + } catch (InterruptedException e) { + throw new AssertionError(e); + } + + while (countDown.tryAcquire()) { + waitForRandomRange(fileContents, sparseFileTracker, listenersCalled::add, gap -> processGap(fileContents, gap)); + } + }); + } + + for (Thread thread : threads) { + thread.start(); + } + + startLatch.countDown(); + + for (Thread thread : threads) { + thread.join(); + } + + assertThat(countDown.availablePermits(), equalTo(0)); + assertTrue(listenersCalled.stream().allMatch(AtomicBoolean::get)); + } + + private static void waitForRandomRange( + byte[] fileContents, + SparseFileTracker sparseFileTracker, + Consumer listenerCalledConsumer, + Consumer gapConsumer + ) { + final long start = randomLongBetween(0L, Math.max(0L, fileContents.length - 1)); + final long end = randomLongBetween(start, fileContents.length); + final AtomicBoolean listenerCalled = new AtomicBoolean(); + listenerCalledConsumer.accept(listenerCalled); + + final List gaps = sparseFileTracker.waitForRange(start, end, new ActionListener() { + @Override + public void onResponse(Void aVoid) { + for (long i = start; i < end; i++) { + assertThat(fileContents[Math.toIntExact(i)], equalTo(AVAILABLE)); + } + assertTrue(listenerCalled.compareAndSet(false, true)); + } + + @Override + public void onFailure(Exception e) { + assertTrue(listenerCalled.compareAndSet(false, true)); + } + }); + + for (final SparseFileTracker.Gap gap : gaps) { + gapConsumer.accept(gap); + } + } + + private static void processGap(byte[] fileContents, SparseFileTracker.Gap gap) { + for (long i = gap.start; i < gap.end; i++) { + assertThat(fileContents[Math.toIntExact(i)], equalTo(UNAVAILABLE)); + } + + if (randomBoolean()) { + gap.onFailure(new ElasticsearchException("simulated")); + } else { + for (long i = gap.start; i < gap.end; i++) { + fileContents[Math.toIntExact(i)] = AVAILABLE; + } + gap.onResponse(null); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/TestUtils.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/TestUtils.java new file mode 100644 index 00000000000..8e0fe4c2fa2 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/cache/TestUtils.java @@ -0,0 +1,196 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.cache; + +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.blobstore.BlobMetadata; +import org.elasticsearch.common.blobstore.BlobPath; +import org.elasticsearch.common.blobstore.DeleteResult; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.store.IndexInputStats; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; + +import java.io.ByteArrayInputStream; +import java.io.FileNotFoundException; +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Map; +import java.util.Random; + +import static com.carrotsearch.randomizedtesting.generators.RandomNumbers.randomIntBetween; +import static com.carrotsearch.randomizedtesting.generators.RandomPicks.randomFrom; +import static java.util.Arrays.asList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.equalTo; + +public final class TestUtils { + private TestUtils() {} + + public static CacheService createCacheService(final Random random) { + final ByteSizeValue cacheSize = new ByteSizeValue( + randomIntBetween(random, 1, 100), + randomFrom(random, asList(ByteSizeUnit.BYTES, ByteSizeUnit.KB, ByteSizeUnit.MB, ByteSizeUnit.GB)) + ); + return new CacheService(cacheSize, randomCacheRangeSize(random)); + } + + public static ByteSizeValue randomCacheRangeSize(final Random random) { + return new ByteSizeValue( + randomIntBetween(random, 1, 100), + randomFrom(random, asList(ByteSizeUnit.BYTES, ByteSizeUnit.KB, ByteSizeUnit.MB)) + ); + } + + public static long numberOfRanges(long fileSize, long rangeSize) { + return numberOfRanges(Math.toIntExact(fileSize), Math.toIntExact(rangeSize)); + } + + static long numberOfRanges(int fileSize, int rangeSize) { + long numberOfRanges = fileSize / rangeSize; + if (fileSize % rangeSize > 0) { + numberOfRanges++; + } + if (numberOfRanges == 0) { + numberOfRanges++; + } + return numberOfRanges; + } + + public static void assertCounter(IndexInputStats.Counter counter, long total, long count, long min, long max) { + assertThat(counter.total(), equalTo(total)); + assertThat(counter.count(), equalTo(count)); + assertThat(counter.min(), equalTo(min)); + assertThat(counter.max(), equalTo(max)); + } + + public static void assertCounter( + IndexInputStats.TimedCounter timedCounter, + long total, + long count, + long min, + long max, + long totalNanoseconds + ) { + assertCounter(timedCounter, total, count, min, max); + assertThat(timedCounter.totalNanoseconds(), equalTo(totalNanoseconds)); + } + + /** + * A {@link BlobContainer} that can read a single in-memory blob. + * Any attempt to read a different blob will throw a {@link FileNotFoundException} + */ + public static BlobContainer singleBlobContainer(final String blobName, final byte[] blobContent) { + return new MostlyUnimplementedFakeBlobContainer() { + @Override + public InputStream readBlob(String name, long position, long length) throws IOException { + if (blobName.equals(name) == false) { + throw new FileNotFoundException("Blob not found: " + name); + } + return Streams.limitStream( + new ByteArrayInputStream(blobContent, Math.toIntExact(position), blobContent.length - Math.toIntExact(position)), + length + ); + } + }; + } + + static BlobContainer singleSplitBlobContainer(final String blobName, final byte[] blobContent, final int partSize) { + if (partSize >= blobContent.length) { + return singleBlobContainer(blobName, blobContent); + } else { + final String prefix = blobName + ".part"; + return new MostlyUnimplementedFakeBlobContainer() { + @Override + public InputStream readBlob(String name, long position, long length) throws IOException { + if (name.startsWith(prefix) == false) { + throw new FileNotFoundException("Blob not found: " + name); + } + assert position + length <= partSize : "cannot read [" + + position + + "-" + + (position + length) + + "] from array part of length [" + + partSize + + "]"; + final int partNumber = Integer.parseInt(name.substring(prefix.length())); + final int positionInBlob = Math.toIntExact(position) + partSize * partNumber; + assert positionInBlob + length <= blobContent.length : "cannot read [" + + positionInBlob + + "-" + + (positionInBlob + length) + + "] from array of length [" + + blobContent.length + + "]"; + return Streams.limitStream( + new ByteArrayInputStream(blobContent, positionInBlob, blobContent.length - positionInBlob), + length + ); + } + }; + } + } + + private static class MostlyUnimplementedFakeBlobContainer implements BlobContainer { + + @Override + public long readBlobPreferredLength() { + return Long.MAX_VALUE; + } + + @Override + public Map listBlobs() { + throw unsupportedException(); + } + + @Override + public BlobPath path() { + throw unsupportedException(); + } + + @Override + public InputStream readBlob(String blobName) { + throw unsupportedException(); + } + + @Override + public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) { + throw unsupportedException(); + } + + @Override + public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) { + throw unsupportedException(); + } + + @Override + public DeleteResult delete() { + throw unsupportedException(); + } + + @Override + public void deleteBlobsIgnoringIfNotExists(List blobNames) { + throw unsupportedException(); + } + + @Override + public Map children() { + throw unsupportedException(); + } + + @Override + public Map listBlobsByPrefix(String blobNamePrefix) { + throw unsupportedException(); + } + + private UnsupportedOperationException unsupportedException() { + assert false : "this operation is not supported and should have not be called"; + return new UnsupportedOperationException("This operation is not supported"); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInputTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInputTests.java new file mode 100644 index 00000000000..f4dbc86ef9c --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/index/store/direct/DirectBlobContainerIndexInputTests.java @@ -0,0 +1,237 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.index.store.direct; + +import org.apache.lucene.store.BufferedIndexInput; +import org.apache.lucene.store.IndexInput; +import org.apache.lucene.util.Version; +import org.elasticsearch.common.blobstore.BlobContainer; +import org.elasticsearch.common.lucene.store.ESIndexInputTestCase; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo; +import org.elasticsearch.index.store.IndexInputStats; +import org.elasticsearch.index.store.StoreFileMetadata; + +import java.io.ByteArrayInputStream; +import java.io.EOFException; +import java.io.FilterInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThanOrEqualTo; +import static org.hamcrest.Matchers.lessThan; +import static org.hamcrest.Matchers.lessThanOrEqualTo; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Matchers.anyInt; +import static org.mockito.Matchers.anyLong; +import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +public class DirectBlobContainerIndexInputTests extends ESIndexInputTestCase { + + private DirectBlobContainerIndexInput createIndexInput(final byte[] input) throws IOException { + return createIndexInput( + input, + randomBoolean() ? input.length : randomIntBetween(1, input.length), + randomIntBetween(1, 1000), + () -> {} + ); + } + + private DirectBlobContainerIndexInput createIndexInput(final byte[] input, long partSize, long minimumReadSize, Runnable onReadBlob) + throws IOException { + final FileInfo fileInfo = new FileInfo( + randomAlphaOfLength(5), + new StoreFileMetadata("test", input.length, "_checksum", Version.LATEST), + partSize == input.length + ? randomFrom( + new ByteSizeValue(partSize, ByteSizeUnit.BYTES), + new ByteSizeValue(randomLongBetween(partSize, Long.MAX_VALUE), ByteSizeUnit.BYTES), + ByteSizeValue.ZERO, + new ByteSizeValue(-1, ByteSizeUnit.BYTES), + null + ) + : new ByteSizeValue(partSize, ByteSizeUnit.BYTES) + ); + + final BlobContainer blobContainer = mock(BlobContainer.class); + when(blobContainer.readBlob(anyString(), anyLong(), anyInt())).thenAnswer(invocationOnMock -> { + String name = (String) invocationOnMock.getArguments()[0]; + long position = (long) invocationOnMock.getArguments()[1]; + long length = (long) invocationOnMock.getArguments()[2]; + assertThat( + "Reading [" + length + "] bytes from [" + name + "] at [" + position + "] exceeds part size [" + partSize + "]", + position + length, + lessThanOrEqualTo(partSize) + ); + + onReadBlob.run(); + + final InputStream stream; + if (fileInfo.numberOfParts() == 1L) { + assertThat("Unexpected blob name [" + name + "]", name, equalTo(fileInfo.name())); + stream = new ByteArrayInputStream(input, Math.toIntExact(position), Math.toIntExact(length)); + + } else { + assertThat("Unexpected blob name [" + name + "]", name, allOf(startsWith(fileInfo.name()), containsString(".part"))); + + long partNumber = Long.parseLong(name.substring(name.indexOf(".part") + ".part".length())); + assertThat( + "Unexpected part number [" + partNumber + "] for [" + name + "]", + partNumber, + allOf(greaterThanOrEqualTo(0L), lessThan(fileInfo.numberOfParts())) + ); + + stream = new ByteArrayInputStream(input, Math.toIntExact(partNumber * partSize + position), Math.toIntExact(length)); + } + + if (randomBoolean()) { + return stream; + } else { + // sometimes serve less bytes than expected, in agreement with InputStream{@link #read(byte[], int, int)} javadoc + return new FilterInputStream(stream) { + @Override + public int read(byte[] b, int off, int len) throws IOException { + return super.read(b, off, randomIntBetween(1, len)); + } + }; + } + }); + return new DirectBlobContainerIndexInput( + blobContainer, + fileInfo, + newIOContext(random()), + new IndexInputStats(0L, () -> 0L), + minimumReadSize, + randomBoolean() ? BufferedIndexInput.BUFFER_SIZE : between(BufferedIndexInput.MIN_BUFFER_SIZE, BufferedIndexInput.BUFFER_SIZE) + ); + } + + public void testRandomReads() throws IOException { + for (int i = 0; i < 100; i++) { + byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(StandardCharsets.UTF_8); + IndexInput indexInput = createIndexInput(input); + assertEquals(input.length, indexInput.length()); + assertEquals(0, indexInput.getFilePointer()); + byte[] output = randomReadAndSlice(indexInput, input.length); + assertArrayEquals(input, output); + } + } + + public void testRandomOverflow() throws IOException { + for (int i = 0; i < 100; i++) { + byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(StandardCharsets.UTF_8); + IndexInput indexInput = createIndexInput(input); + int firstReadLen = randomIntBetween(0, input.length - 1); + randomReadAndSlice(indexInput, firstReadLen); + int bytesLeft = input.length - firstReadLen; + int secondReadLen = bytesLeft + randomIntBetween(1, 100); + expectThrows(EOFException.class, () -> indexInput.readBytes(new byte[secondReadLen], 0, secondReadLen)); + } + } + + public void testSeekOverflow() throws IOException { + for (int i = 0; i < 100; i++) { + byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(StandardCharsets.UTF_8); + IndexInput indexInput = createIndexInput(input); + int firstReadLen = randomIntBetween(0, input.length - 1); + randomReadAndSlice(indexInput, firstReadLen); + expectThrows(IOException.class, () -> { + switch (randomIntBetween(0, 2)) { + case 0: + indexInput.seek(Integer.MAX_VALUE + 4L); + break; + case 1: + indexInput.seek(-randomIntBetween(1, 10)); + break; + default: + int seek = input.length + randomIntBetween(1, 100); + indexInput.seek(seek); + break; + } + }); + } + } + + public void testSequentialReadsShareInputStreamFromBlobStore() throws IOException { + for (int i = 0; i < 100; i++) { + final byte[] input = randomUnicodeOfLength(randomIntBetween(1, 1000)).getBytes(StandardCharsets.UTF_8); + final int minimumReadSize = randomIntBetween(1, 1000); + final int partSize = randomBoolean() ? input.length : randomIntBetween(1, input.length); + + final AtomicInteger readBlobCount = new AtomicInteger(); + final BufferedIndexInput indexInput = createIndexInput(input, partSize, minimumReadSize, readBlobCount::incrementAndGet); + + assertEquals(input.length, indexInput.length()); + + final int readStart = randomIntBetween(0, input.length); + final int readEnd = randomIntBetween(readStart, input.length); + final int readLen = readEnd - readStart; + + indexInput.seek(readStart); + + // Straightforward sequential reading from `indexInput` (no cloning, slicing or seeking) + final byte[] output = new byte[readLen]; + int readPos = readStart; + while (readPos < readEnd) { + if (randomBoolean()) { + output[readPos++ - readStart] = indexInput.readByte(); + } else { + int len = randomIntBetween(1, readEnd - readPos); + indexInput.readBytes(output, readPos - readStart, len); + readPos += len; + } + } + assertEquals(readEnd, readPos); + assertEquals(readEnd, indexInput.getFilePointer()); + + final byte[] expected = new byte[readLen]; + System.arraycopy(input, readStart, expected, 0, readLen); + assertArrayEquals(expected, output); + + // compute the maximum expected number of ranges read from the blob store + final int firstPart = readStart / partSize; + final int bufferedEnd = readEnd + indexInput.getBufferSize() - 1; + final int lastPart = (bufferedEnd - 1) / partSize; // may overshoot a part due to buffering but not due to readahead + + final int expectedRanges; + if (firstPart == lastPart) { + final int bufferedBytes = bufferedEnd - readStart; + expectedRanges = (bufferedBytes + minimumReadSize - 1) / minimumReadSize; // ceil(bufferedBytes/minimumReadSize) + } else { + // read was split across parts; each part involves at least one range + + final int bytesInFirstPart = (firstPart + 1) * partSize - readStart; + // ceil(bytesInFirstPart/minimumReadSize) + final int rangesInFirstPart = (bytesInFirstPart + minimumReadSize - 1) / minimumReadSize; + + final int bytesInLastPart = bufferedEnd - lastPart * partSize; + // ceil(bytesInLastPart/minimumReadSize) + final int rangesInLastPart = (bytesInLastPart + minimumReadSize - 1) / minimumReadSize; + + // ceil(partSize/minimumReadSize); + final int rangesInMiddleParts = (partSize + minimumReadSize - 1) / minimumReadSize; + final int middlePartCount = lastPart - firstPart - 1; + + expectedRanges = rangesInFirstPart + rangesInLastPart + rangesInMiddleParts * middlePartCount; + } + + assertThat( + "data was read in ranges of no less than " + minimumReadSize + " where possible", + readBlobCount.get(), + lessThanOrEqualTo(expectedRanges) + ); + } + } + +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsRestTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsRestTestCase.java new file mode 100644 index 00000000000..0800edf72b1 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/AbstractSearchableSnapshotsRestTestCase.java @@ -0,0 +1,424 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryRequest; +import org.elasticsearch.client.Request; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.bytes.BytesReference; +import org.elasticsearch.common.io.Streams; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.xcontent.ToXContent; +import org.elasticsearch.common.xcontent.XContentBuilder; +import org.elasticsearch.common.xcontent.XContentHelper; +import org.elasticsearch.common.xcontent.XContentType; +import org.elasticsearch.common.xcontent.json.JsonXContent; +import org.elasticsearch.common.xcontent.support.XContentMapValues; +import org.elasticsearch.index.query.QueryBuilder; +import org.elasticsearch.index.query.QueryBuilders; +import org.elasticsearch.rest.RestStatus; +import org.elasticsearch.search.builder.SearchSourceBuilder; +import org.elasticsearch.test.rest.ESRestTestCase; + +import java.io.IOException; +import java.io.InputStream; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.function.Function; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.notNullValue; + +public abstract class AbstractSearchableSnapshotsRestTestCase extends ESRestTestCase { + + protected abstract String repositoryType(); + + protected abstract Settings repositorySettings(); + + private void runSearchableSnapshotsTest(SearchableSnapshotsTestCaseBody testCaseBody) throws Exception { + final String repositoryType = repositoryType(); + final Settings repositorySettings = repositorySettings(); + + final String repository = "repository"; + logger.info("creating repository [{}] of type [{}]", repository, repositoryType); + registerRepository(repository, repositoryType, true, repositorySettings); + + final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final int numberOfShards = randomIntBetween(1, 5); + + logger.info("creating index [{}]", indexName); + createIndex( + indexName, + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards) + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .build() + ); + ensureGreen(indexName); + + final int numDocs = randomIntBetween(1, 10_000); + logger.info("indexing [{}] documents", numDocs); + + final StringBuilder bulkBody = new StringBuilder(); + for (int i = 0; i < numDocs; i++) { + bulkBody.append("{\"index\":{\"_id\":\"").append(i).append("\"}}\n"); + bulkBody.append("{\"field\":").append(i).append(",\"text\":\"Document number ").append(i).append("\"}\n"); + } + + final Request documents = new Request(HttpPost.METHOD_NAME, '/' + indexName + "/_bulk"); + documents.addParameter("refresh", Boolean.TRUE.toString()); + documents.setJsonEntity(bulkBody.toString()); + assertOK(client().performRequest(documents)); + + if (randomBoolean()) { + final StringBuilder bulkUpdateBody = new StringBuilder(); + for (int i = 0; i < randomIntBetween(1, numDocs); i++) { + bulkUpdateBody.append("{\"update\":{\"_id\":\"").append(i).append("\"}}\n"); + bulkUpdateBody.append("{\"doc\":{").append("\"text\":\"Updated document number ").append(i).append("\"}}\n"); + } + + final Request bulkUpdate = new Request(HttpPost.METHOD_NAME, '/' + indexName + "/_bulk"); + bulkUpdate.addParameter("refresh", Boolean.TRUE.toString()); + bulkUpdate.setJsonEntity(bulkUpdateBody.toString()); + assertOK(client().performRequest(bulkUpdate)); + } + + logger.info("force merging index [{}]", indexName); + forceMerge(indexName, randomBoolean(), randomBoolean()); + + final String snapshot = "searchable-snapshot"; + + // Remove the snapshots, if a previous test failed to delete them. This is + // useful for third party tests that runs the test against a real external service. + deleteSnapshot(repository, snapshot, true); + + logger.info("creating snapshot [{}]", snapshot); + createSnapshot(repository, snapshot, true); + + logger.info("deleting index [{}]", indexName); + deleteIndex(indexName); + + final String restoredIndexName = randomBoolean() ? indexName : randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + logger.info("restoring index [{}] from snapshot [{}] as [{}]", indexName, snapshot, restoredIndexName); + mountSnapshot(repository, snapshot, true, indexName, restoredIndexName, Settings.EMPTY); + + ensureGreen(restoredIndexName); + + final Number count = count(restoredIndexName); + assertThat("Wrong index count for index " + restoredIndexName, count.intValue(), equalTo(numDocs)); + + testCaseBody.runTest(restoredIndexName, numDocs); + + logger.info("deleting snapshot [{}]", snapshot); + deleteSnapshot(repository, snapshot, false); + } + + public void testSearchResults() throws Exception { + runSearchableSnapshotsTest((restoredIndexName, numDocs) -> { + for (int i = 0; i < 10; i++) { + assertSearchResults(restoredIndexName, numDocs, randomFrom(Boolean.TRUE, Boolean.FALSE, null)); + } + }); + } + + public void testSearchResultsWhenFrozen() throws Exception { + runSearchableSnapshotsTest((restoredIndexName, numDocs) -> { + final Request freezeRequest = new Request(HttpPost.METHOD_NAME, restoredIndexName + "/_freeze"); + assertOK(client().performRequest(freezeRequest)); + ensureGreen(restoredIndexName); + for (int i = 0; i < 10; i++) { + assertSearchResults(restoredIndexName, numDocs, Boolean.FALSE); + } + }); + } + + public void testCloseAndReopen() throws Exception { + runSearchableSnapshotsTest((restoredIndexName, numDocs) -> { + final Request closeRequest = new Request(HttpPost.METHOD_NAME, restoredIndexName + "/_close"); + assertOK(client().performRequest(closeRequest)); + ensureGreen(restoredIndexName); + + final Request openRequest = new Request(HttpPost.METHOD_NAME, restoredIndexName + "/_open"); + assertOK(client().performRequest(openRequest)); + ensureGreen(restoredIndexName); + + for (int i = 0; i < 10; i++) { + assertSearchResults(restoredIndexName, numDocs, randomFrom(Boolean.TRUE, Boolean.FALSE, null)); + } + }); + } + + public void testStats() throws Exception { + runSearchableSnapshotsTest((restoredIndexName, numDocs) -> { + final Map stats = searchableSnapshotStats(restoredIndexName); + assertThat("Expected searchable snapshots stats for [" + restoredIndexName + ']', stats.size(), greaterThan(0)); + + final int nbShards = Integer.valueOf(extractValue(indexSettings(restoredIndexName), IndexMetadata.SETTING_NUMBER_OF_SHARDS)); + assertThat("Expected searchable snapshots stats for " + nbShards + " shards but got " + stats, stats.size(), equalTo(nbShards)); + }); + } + + public void testClearCache() throws Exception { + @SuppressWarnings("unchecked") + final Function, Long> sumCachedBytesWritten = stats -> stats.values() + .stream() + .filter(o -> o instanceof List) + .flatMap(o -> ((List) o).stream()) + .filter(o -> o instanceof Map) + .map(o -> ((Map) o).get("files")) + .filter(o -> o instanceof List) + .flatMap(o -> ((List) o).stream()) + .filter(o -> o instanceof Map) + .map(o -> ((Map) o).get("cached_bytes_written")) + .filter(o -> o instanceof Map) + .map(o -> ((Map) o).get("sum")) + .mapToLong(o -> ((Number) o).longValue()) + .sum(); + + runSearchableSnapshotsTest((restoredIndexName, numDocs) -> { + + Map searchResults = search(restoredIndexName, QueryBuilders.matchAllQuery(), Boolean.TRUE); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + + final long bytesInCacheBeforeClear = sumCachedBytesWritten.apply(searchableSnapshotStats(restoredIndexName)); + assertThat(bytesInCacheBeforeClear, greaterThan(0L)); + + clearCache(restoredIndexName); + + final long bytesInCacheAfterClear = sumCachedBytesWritten.apply(searchableSnapshotStats(restoredIndexName)); + assertThat(bytesInCacheAfterClear, equalTo(bytesInCacheBeforeClear)); + + searchResults = search(restoredIndexName, QueryBuilders.matchAllQuery(), Boolean.TRUE); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + + final long bytesInCacheAfterSearch = sumCachedBytesWritten.apply(searchableSnapshotStats(restoredIndexName)); + assertThat(bytesInCacheAfterSearch, greaterThan(bytesInCacheBeforeClear)); + }); + } + + private void clearCache(String restoredIndexName) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, restoredIndexName + "/_searchable_snapshots/cache/clear"); + assertOK(client().performRequest(request)); + } + + public void assertSearchResults(String indexName, int numDocs, Boolean ignoreThrottled) throws IOException { + + if (randomBoolean()) { + logger.info("clearing searchable snapshots cache for [{}] before search", indexName); + clearCache(indexName); + } + + final int randomTieBreaker = randomIntBetween(0, numDocs - 1); + Map searchResults; + switch (randomInt(3)) { + case 0: + searchResults = search(indexName, QueryBuilders.termQuery("field", String.valueOf(randomTieBreaker)), ignoreThrottled); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(1)); + @SuppressWarnings("unchecked") + Map searchHit = (Map) ((List) extractValue(searchResults, "hits.hits")).get(0); + assertThat(extractValue(searchHit, "_index"), equalTo(indexName)); + assertThat(extractValue(searchHit, "_source.field"), equalTo(randomTieBreaker)); + break; + case 1: + searchResults = search(indexName, QueryBuilders.rangeQuery("field").lt(randomTieBreaker), ignoreThrottled); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(randomTieBreaker)); + break; + case 2: + searchResults = search(indexName, QueryBuilders.rangeQuery("field").gte(randomTieBreaker), ignoreThrottled); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs - randomTieBreaker)); + break; + case 3: + searchResults = search(indexName, QueryBuilders.matchQuery("text", "document"), ignoreThrottled); + assertThat(extractValue(searchResults, "hits.total.value"), equalTo(numDocs)); + break; + default: + fail("Unsupported randomized search query"); + } + } + + protected static void registerRepository(String repository, String type, boolean verify, Settings settings) throws IOException { + final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository); + request.setJsonEntity(Strings.toString(new PutRepositoryRequest(repository).type(type).verify(verify).settings(settings))); + + final Response response = client().performRequest(request); + assertThat( + "Failed to create repository [" + repository + "] of type [" + type + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + } + + protected static void createSnapshot(String repository, String snapshot, boolean waitForCompletion) throws IOException { + final Request request = new Request(HttpPut.METHOD_NAME, "_snapshot/" + repository + '/' + snapshot); + request.addParameter("wait_for_completion", Boolean.toString(waitForCompletion)); + + final Response response = client().performRequest(request); + assertThat( + "Failed to create snapshot [" + snapshot + "] in repository [" + repository + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + } + + protected static void deleteSnapshot(String repository, String snapshot, boolean ignoreMissing) throws IOException { + final Request request = new Request(HttpDelete.METHOD_NAME, "_snapshot/" + repository + '/' + snapshot); + try { + final Response response = client().performRequest(request); + assertThat( + "Failed to delete snapshot [" + snapshot + "] in repository [" + repository + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + } catch (IOException e) { + if (ignoreMissing && e instanceof ResponseException) { + Response response = ((ResponseException) e).getResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(RestStatus.NOT_FOUND.getStatus())); + return; + } + throw e; + } + } + + protected static void mountSnapshot( + String repository, + String snapshot, + boolean waitForCompletion, + String snapshotIndexName, + String mountIndexName, + Settings indexSettings + ) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, "/_snapshot/" + repository + "/" + snapshot + "/_mount"); + request.addParameter("wait_for_completion", Boolean.toString(waitForCompletion)); + + final XContentBuilder builder = JsonXContent.contentBuilder().startObject().field("index", snapshotIndexName); + if (snapshotIndexName.equals(mountIndexName) == false || randomBoolean()) { + builder.field("renamed_index", mountIndexName); + } + if (indexSettings.isEmpty() == false) { + builder.startObject("index_settings"); + indexSettings.toXContent(builder, ToXContent.EMPTY_PARAMS); + builder.endObject(); + } + builder.endObject(); + request.setJsonEntity(Strings.toString(builder)); + + final Response response = client().performRequest(request); + assertThat( + "Failed to restore snapshot [" + snapshot + "] in repository [" + repository + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + } + + protected static void forceMerge(String index, boolean onlyExpungeDeletes, boolean flush) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, '/' + index + "/_forcemerge"); + request.addParameter("only_expunge_deletes", Boolean.toString(onlyExpungeDeletes)); + request.addParameter("flush", Boolean.toString(flush)); + assertOK(client().performRequest(request)); + } + + protected static Number count(String index) throws IOException { + final Response response = client().performRequest(new Request(HttpPost.METHOD_NAME, '/' + index + "/_count")); + assertThat( + "Failed to execute count request on index [" + index + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + + final Map responseAsMap = responseAsMap(response); + assertThat( + "Shard failures when executing count request on index [" + index + "]: " + response, + extractValue(responseAsMap, "_shards.failed"), + equalTo(0) + ); + return (Number) extractValue(responseAsMap, "count"); + } + + protected static Map search(String index, QueryBuilder query, Boolean ignoreThrottled) throws IOException { + final Request request = new Request(HttpPost.METHOD_NAME, '/' + index + "/_search"); + request.setJsonEntity(new SearchSourceBuilder().trackTotalHits(true).query(query).toString()); + if (ignoreThrottled != null) { + request.addParameter("ignore_throttled", ignoreThrottled.toString()); + } + + final Response response = client().performRequest(request); + assertThat( + "Failed to execute search request on index [" + index + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + + final Map responseAsMap = responseAsMap(response); + assertThat( + "Shard failures when executing search request on index [" + index + "]: " + response, + extractValue(responseAsMap, "_shards.failed"), + equalTo(0) + ); + return responseAsMap; + } + + protected static Map searchableSnapshotStats(String index) throws IOException { + final Response response = client().performRequest(new Request(HttpGet.METHOD_NAME, '/' + index + "/_searchable_snapshots/stats")); + assertThat( + "Failed to retrieve searchable snapshots stats for on index [" + index + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + + final Map responseAsMap = responseAsMap(response); + assertThat( + "Shard failures when retrieving searchable snapshots stats for index [" + index + "]: " + response, + extractValue(responseAsMap, "_shards.failed"), + equalTo(0) + ); + return extractValue(responseAsMap, "indices." + index + ".shards"); + } + + protected static Map indexSettings(String index) throws IOException { + final Response response = client().performRequest(new Request(HttpGet.METHOD_NAME, '/' + index)); + assertThat( + "Failed to get settings on index [" + index + "]: " + response, + response.getStatusLine().getStatusCode(), + equalTo(RestStatus.OK.getStatus()) + ); + return extractValue(responseAsMap(response), index + ".settings"); + } + + protected static Map responseAsMap(Response response) throws IOException { + final XContentType xContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue()); + assertThat("Unknown XContentType", xContentType, notNullValue()); + + BytesReference bytesReference = Streams.readFully(response.getEntity().getContent()); + + try (InputStream responseBody = bytesReference.streamInput()) { + return XContentHelper.convertToMap(xContentType.xContent(), responseBody, true); + } catch (Exception e) { + throw new IOException(bytesReference.utf8ToString(), e); + } + } + + @SuppressWarnings("unchecked") + protected static T extractValue(Map map, String path) { + return (T) XContentMapValues.extractValue(path, map); + } + + /** + * The body of a test case, which runs after the searchable snapshot has been created and restored. + */ + @FunctionalInterface + interface SearchableSnapshotsTestCaseBody { + void runTest(String indexName, int numDocs) throws IOException; + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java new file mode 100644 index 00000000000..96ae5c58443 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/BaseSearchableSnapshotsIntegTestCase.java @@ -0,0 +1,86 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.elasticsearch.common.collect.List; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.license.LicenseService; +import org.elasticsearch.plugins.Plugin; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.LocalStateCompositeXPackPlugin; +import org.elasticsearch.xpack.core.XPackSettings; +import org.elasticsearch.xpack.searchablesnapshots.cache.CacheService; + +import java.util.Collection; + +public abstract class BaseSearchableSnapshotsIntegTestCase extends ESIntegTestCase { + @Override + protected boolean addMockInternalEngine() { + return false; + } + + @Override + protected Collection> nodePlugins() { + return List.of(SearchableSnapshots.class, LocalStateCompositeXPackPlugin.class); + } + + @Override + protected Collection> transportClientPlugins() { + return nodePlugins(); + } + + @Override + protected Settings nodeSettings(int nodeOrdinal) { + final Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal)); + builder.put(LicenseService.SELF_GENERATED_LICENSE_TYPE.getKey(), "trial"); + builder.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + if (randomBoolean()) { + builder.put( + CacheService.SNAPSHOT_CACHE_SIZE_SETTING.getKey(), + rarely() + ? new ByteSizeValue(randomIntBetween(0, 10), ByteSizeUnit.KB) + : new ByteSizeValue(randomIntBetween(1, 10), ByteSizeUnit.MB) + ); + } + if (randomBoolean()) { + builder.put( + CacheService.SNAPSHOT_CACHE_RANGE_SIZE_SETTING.getKey(), + rarely() + ? new ByteSizeValue(randomIntBetween(4, 1024), ByteSizeUnit.KB) + : new ByteSizeValue(randomIntBetween(1, 10), ByteSizeUnit.MB) + ); + } + return builder.build(); + } + + @Override + protected Settings transportClientSettings() { + final Settings.Builder builder = Settings.builder().put(super.transportClientSettings()); + builder.put(XPackSettings.SECURITY_ENABLED.getKey(), false); + return builder.build(); + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java new file mode 100644 index 00000000000..1e79bd250d4 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsIntegTests.java @@ -0,0 +1,313 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.apache.lucene.index.IndexFileNames; +import org.apache.lucene.search.TotalHits; +import org.elasticsearch.ResourceNotFoundException; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse; +import org.elasticsearch.action.index.IndexRequestBuilder; +import org.elasticsearch.cluster.metadata.IndexMetadata; +import org.elasticsearch.cluster.node.DiscoveryNode; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.common.Priority; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.ByteSizeUnit; +import org.elasticsearch.common.unit.ByteSizeValue; +import org.elasticsearch.common.util.concurrent.AtomicArray; +import org.elasticsearch.index.IndexModule; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.indices.recovery.RecoveryState; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsAction; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsRequest; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsResponse; + +import java.nio.file.Path; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.Locale; +import java.util.concurrent.CountDownLatch; +import java.util.stream.StreamSupport; + +import static org.elasticsearch.index.query.QueryBuilders.matchQuery; +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.elasticsearch.xpack.searchablesnapshots.SearchableSnapshots.SNAPSHOT_DIRECTORY_FACTORY_KEY; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.hasSize; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +public class SearchableSnapshotsIntegTests extends BaseSearchableSnapshotsIntegTestCase { + + public void testCreateAndRestoreSearchableSnapshot() throws Exception { + final String fsRepoName = randomAlphaOfLength(10); + final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String restoredIndexName = randomBoolean() ? indexName : randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + final String snapshotName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT); + + final Path repo = randomRepoPath(); + assertAcked( + client().admin() + .cluster() + .preparePutRepository(fsRepoName) + .setType("fs") + .setSettings(Settings.builder().put("location", repo).put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)) + ); + + createIndex(indexName); + final List indexRequestBuilders = new ArrayList<>(); + for (int i = between(10, 10_000); i >= 0; i--) { + indexRequestBuilders.add(client().prepareIndex(indexName, "_doc").setSource("foo", randomBoolean() ? "bar" : "baz")); + } + // TODO NORELEASE no dummy docs since that includes deletes, yet we always copy the .liv file in peer recovery + indexRandom(true, false, indexRequestBuilders); + refresh(indexName); + assertThat( + client().admin().indices().prepareForceMerge(indexName).setOnlyExpungeDeletes(true).setFlush(true).get().getFailedShards(), + equalTo(0) + ); + + final TotalHits originalAllHits = internalCluster().client() + .prepareSearch(indexName) + .setTrackTotalHits(true) + .get() + .getHits() + .getTotalHits(); + final TotalHits originalBarHits = internalCluster().client() + .prepareSearch(indexName) + .setTrackTotalHits(true) + .setQuery(matchQuery("foo", "bar")) + .get() + .getHits() + .getTotalHits(); + logger.info("--> [{}] in total, of which [{}] match the query", originalAllHits, originalBarHits); + + expectThrows( + ResourceNotFoundException.class, + "Searchable snapshot stats on a non snapshot searchable index should fail", + () -> client().execute(SearchableSnapshotsStatsAction.INSTANCE, new SearchableSnapshotsStatsRequest()).actionGet() + ); + + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(fsRepoName, snapshotName) + .setWaitForCompletion(true) + .get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + assertAcked(client().admin().indices().prepareDelete(indexName)); + + final boolean cacheEnabled = randomBoolean(); + logger.info("--> restoring index [{}] with cache [{}]", restoredIndexName, cacheEnabled ? "enabled" : "disabled"); + + Settings.Builder indexSettingsBuilder = Settings.builder() + .put(SearchableSnapshots.SNAPSHOT_CACHE_ENABLED_SETTING.getKey(), cacheEnabled) + .put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), Boolean.FALSE.toString()); + final List nonCachedExtensions; + if (randomBoolean()) { + nonCachedExtensions = randomSubsetOf(Arrays.asList("fdt", "fdx", "nvd", "dvd", "tip", "cfs", "dim")); + indexSettingsBuilder.putList(SearchableSnapshots.SNAPSHOT_CACHE_EXCLUDED_FILE_TYPES_SETTING.getKey(), nonCachedExtensions); + } else { + nonCachedExtensions = Collections.emptyList(); + } + if (randomBoolean()) { + indexSettingsBuilder.put( + SearchableSnapshots.SNAPSHOT_UNCACHED_CHUNK_SIZE_SETTING.getKey(), + new ByteSizeValue(randomLongBetween(10, 100_000)) + ); + } + final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest( + restoredIndexName, + fsRepoName, + snapshotInfo.snapshotId().getName(), + indexName, + indexSettingsBuilder.build(), + Strings.EMPTY_ARRAY, + true + ); + + final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + + final Settings settings = client().admin() + .indices() + .prepareGetSettings(restoredIndexName) + .get() + .getIndexToSettings() + .get(restoredIndexName); + assertThat(SearchableSnapshots.SNAPSHOT_REPOSITORY_SETTING.get(settings), equalTo(fsRepoName)); + assertThat(SearchableSnapshots.SNAPSHOT_SNAPSHOT_NAME_SETTING.get(settings), equalTo(snapshotName)); + assertThat(IndexModule.INDEX_STORE_TYPE_SETTING.get(settings), equalTo(SNAPSHOT_DIRECTORY_FACTORY_KEY)); + assertTrue(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.get(settings)); + assertTrue(SearchableSnapshots.SNAPSHOT_SNAPSHOT_ID_SETTING.exists(settings)); + assertTrue(SearchableSnapshots.SNAPSHOT_INDEX_ID_SETTING.exists(settings)); + + assertRecovered(restoredIndexName, originalAllHits, originalBarHits); + assertSearchableSnapshotStats(restoredIndexName, cacheEnabled, nonCachedExtensions); + + internalCluster().fullRestart(); + assertRecovered(restoredIndexName, originalAllHits, originalBarHits); + assertSearchableSnapshotStats(restoredIndexName, cacheEnabled, nonCachedExtensions); + + internalCluster().ensureAtLeastNumDataNodes(2); + + final DiscoveryNode dataNode = randomFrom( + StreamSupport.stream( + client().admin().cluster().prepareState().get().getState().nodes().getDataNodes().values().spliterator(), + false + ).map(c -> c.value).toArray(DiscoveryNode[]::new) + ); + + assertAcked( + client().admin() + .indices() + .prepareUpdateSettings(restoredIndexName) + .setSettings( + Settings.builder() + .put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0) + .put( + IndexMetadata.INDEX_ROUTING_REQUIRE_GROUP_SETTING.getConcreteSettingForNamespace("_name").getKey(), + dataNode.getName() + ) + ) + ); + + assertFalse( + client().admin() + .cluster() + .prepareHealth(restoredIndexName) + .setWaitForNoRelocatingShards(true) + .setWaitForEvents(Priority.LANGUID) + .get() + .isTimedOut() + ); + + assertRecovered(restoredIndexName, originalAllHits, originalBarHits); + assertSearchableSnapshotStats(restoredIndexName, cacheEnabled, nonCachedExtensions); + } + + private void assertRecovered(String indexName, TotalHits originalAllHits, TotalHits originalBarHits) throws Exception { + final Thread[] threads = new Thread[between(1, 5)]; + final AtomicArray allHits = new AtomicArray<>(threads.length); + final AtomicArray barHits = new AtomicArray<>(threads.length); + + final CountDownLatch latch = new CountDownLatch(1); + for (int i = 0; i < threads.length; i++) { + int t = i; + threads[i] = new Thread(() -> { + try { + latch.await(); + } catch (InterruptedException e) { + throw new RuntimeException(e); + } + allHits.set(t, client().prepareSearch(indexName).setTrackTotalHits(true).get().getHits().getTotalHits()); + barHits.set( + t, + client().prepareSearch(indexName) + .setTrackTotalHits(true) + .setQuery(matchQuery("foo", "bar")) + .get() + .getHits() + .getTotalHits() + ); + }); + threads[i].start(); + } + + ensureGreen(indexName); + latch.countDown(); + + final RecoveryResponse recoveryResponse = client().admin().indices().prepareRecoveries(indexName).get(); + for (List recoveryStates : recoveryResponse.shardRecoveryStates().values()) { + for (RecoveryState recoveryState : recoveryStates) { + logger.info("Checking {}[{}]", recoveryState.getShardId(), recoveryState.getPrimary() ? "p" : "r"); + assertThat(recoveryState.getIndex().recoveredFileCount(), lessThanOrEqualTo(1)); // we make a new commit so we write a new + // `segments_n` file + } + } + + for (int i = 0; i < threads.length; i++) { + threads[i].join(); + + final TotalHits allTotalHits = allHits.get(i); + final TotalHits barTotalHits = barHits.get(i); + + logger.info("--> thread #{} has [{}] hits in total, of which [{}] match the query", i, allTotalHits, barTotalHits); + assertThat(allTotalHits, equalTo(originalAllHits)); + assertThat(barTotalHits, equalTo(originalBarHits)); + } + } + + private void assertSearchableSnapshotStats(String indexName, boolean cacheEnabled, List nonCachedExtensions) { + final SearchableSnapshotsStatsResponse statsResponse = client().execute( + SearchableSnapshotsStatsAction.INSTANCE, + new SearchableSnapshotsStatsRequest(indexName) + ).actionGet(); + final NumShards restoredNumShards = getNumShards(indexName); + assertThat(statsResponse.getStats(), hasSize(restoredNumShards.totalNumShards)); + + for (SearchableSnapshotShardStats stats : statsResponse.getStats()) { + final ShardRouting shardRouting = stats.getShardRouting(); + assertThat(stats.getShardRouting().getIndexName(), equalTo(indexName)); + if (shardRouting.started()) { + assertThat("Expecting stats to exist for at least 1 Lucene file", stats.getStats().size(), greaterThan(0)); + for (SearchableSnapshotShardStats.CacheIndexInputStats indexInputStats : stats.getStats()) { + final String fileName = indexInputStats.getFileName(); + assertThat( + "Unexpected open count for " + fileName + " of shard " + shardRouting, + indexInputStats.getOpenCount(), + greaterThan(0L) + ); + assertThat( + "Unexpected close count for " + fileName + " of shard " + shardRouting, + indexInputStats.getCloseCount(), + lessThanOrEqualTo(indexInputStats.getOpenCount()) + ); + assertThat( + "Unexpected file length for " + fileName + " of shard " + shardRouting, + indexInputStats.getFileLength(), + greaterThan(0L) + ); + + if (cacheEnabled == false || nonCachedExtensions.contains(IndexFileNames.getExtension(fileName))) { + assertThat( + "Expected at least 1 optimized or direct read for " + fileName + " of shard " + shardRouting, + Math.max(indexInputStats.getOptimizedBytesRead().getCount(), indexInputStats.getDirectBytesRead().getCount()), + greaterThan(0L) + ); + assertThat( + "Expected no cache read or write for " + fileName + " of shard " + shardRouting, + Math.max(indexInputStats.getCachedBytesRead().getCount(), indexInputStats.getCachedBytesWritten().getCount()), + equalTo(0L) + ); + } else { + assertThat( + "Expected at least 1 cache read or write for " + fileName + " of shard " + shardRouting, + Math.max(indexInputStats.getCachedBytesRead().getCount(), indexInputStats.getCachedBytesWritten().getCount()), + greaterThan(0L) + ); + assertThat( + "Expected no optimized read for " + fileName + " of shard " + shardRouting, + indexInputStats.getOptimizedBytesRead().getCount(), + equalTo(0L) + ); + } + } + } + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java new file mode 100644 index 00000000000..2878f8e91c1 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/SearchableSnapshotsLicenseIntegTests.java @@ -0,0 +1,157 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.xpack.searchablesnapshots; + +import org.elasticsearch.ElasticsearchSecurityException; +import org.elasticsearch.ExceptionsHelper; +import org.elasticsearch.action.ActionFuture; +import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse; +import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.index.IndexSettings; +import org.elasticsearch.license.DeleteLicenseAction; +import org.elasticsearch.license.PostStartBasicAction; +import org.elasticsearch.license.PostStartBasicRequest; +import org.elasticsearch.protocol.xpack.license.DeleteLicenseRequest; +import org.elasticsearch.snapshots.SnapshotInfo; +import org.elasticsearch.test.ESIntegTestCase; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotAction; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; +import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheAction; +import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheRequest; +import org.elasticsearch.xpack.searchablesnapshots.action.ClearSearchableSnapshotsCacheResponse; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsAction; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsRequest; +import org.elasticsearch.xpack.searchablesnapshots.action.SearchableSnapshotsStatsResponse; +import org.junit.Before; + +import java.nio.file.Path; +import java.util.concurrent.ExecutionException; + +import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.instanceOf; +import static org.hamcrest.Matchers.notNullValue; + +@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST) +public class SearchableSnapshotsLicenseIntegTests extends BaseSearchableSnapshotsIntegTestCase { + + private static final String repoName = "test-repo"; + private static final String indexName = "test-index"; + private static final String snapshotName = "test-snapshot"; + + @Before + public void createAndMountSearchableSnapshot() throws Exception { + final Path repo = randomRepoPath(); + assertAcked( + client().admin().cluster().preparePutRepository(repoName).setType("fs").setSettings(Settings.builder().put("location", repo)) + ); + + createIndex(indexName); + + CreateSnapshotResponse createSnapshotResponse = client().admin() + .cluster() + .prepareCreateSnapshot(repoName, snapshotName) + .setWaitForCompletion(true) + .get(); + final SnapshotInfo snapshotInfo = createSnapshotResponse.getSnapshotInfo(); + assertThat(snapshotInfo.successfulShards(), greaterThan(0)); + assertThat(snapshotInfo.successfulShards(), equalTo(snapshotInfo.totalShards())); + + assertAcked(client().admin().indices().prepareDelete(indexName)); + + final Settings.Builder indexSettingsBuilder = Settings.builder().put(IndexSettings.INDEX_CHECK_ON_STARTUP.getKey(), false); + final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest( + indexName, + repoName, + snapshotName, + indexName, + indexSettingsBuilder.build(), + Strings.EMPTY_ARRAY, + true + ); + + final RestoreSnapshotResponse restoreSnapshotResponse = client().execute(MountSearchableSnapshotAction.INSTANCE, req).get(); + assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0)); + ensureGreen(indexName); + + assertAcked(client().execute(DeleteLicenseAction.INSTANCE, new DeleteLicenseRequest()).get()); + assertAcked(client().execute(PostStartBasicAction.INSTANCE, new PostStartBasicRequest()).get()); + } + + public void testMountRequiresLicense() { + final MountSearchableSnapshotRequest req = new MountSearchableSnapshotRequest( + indexName + "-extra", + repoName, + snapshotName, + indexName, + Settings.EMPTY, + Strings.EMPTY_ARRAY, + randomBoolean() + ); + + final ActionFuture future = client().execute(MountSearchableSnapshotAction.INSTANCE, req); + final Throwable cause = ExceptionsHelper.unwrap(expectThrows(Exception.class, future::get), ElasticsearchSecurityException.class); + assertThat(cause, notNullValue()); + assertThat(cause, instanceOf(ElasticsearchSecurityException.class)); + assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]")); + } + + public void testStatsRequiresLicense() throws ExecutionException, InterruptedException { + final ActionFuture future = client().execute( + SearchableSnapshotsStatsAction.INSTANCE, + new SearchableSnapshotsStatsRequest(indexName) + ); + final SearchableSnapshotsStatsResponse response = future.get(); + assertThat(response.getTotalShards(), greaterThan(0)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + for (DefaultShardOperationFailedException shardFailure : response.getShardFailures()) { + final Throwable cause = ExceptionsHelper.unwrap(shardFailure.getCause(), ElasticsearchSecurityException.class); + assertThat(cause, notNullValue()); + assertThat(cause, instanceOf(ElasticsearchSecurityException.class)); + assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]")); + } + } + + public void testClearCacheRequiresLicense() throws ExecutionException, InterruptedException { + final ActionFuture future = client().execute( + ClearSearchableSnapshotsCacheAction.INSTANCE, + new ClearSearchableSnapshotsCacheRequest(indexName) + ); + final ClearSearchableSnapshotsCacheResponse response = future.get(); + assertThat(response.getTotalShards(), greaterThan(0)); + assertThat(response.getSuccessfulShards(), equalTo(0)); + for (DefaultShardOperationFailedException shardFailure : response.getShardFailures()) { + final Throwable cause = ExceptionsHelper.unwrap(shardFailure.getCause(), ElasticsearchSecurityException.class); + assertThat(cause, notNullValue()); + assertThat(cause, instanceOf(ElasticsearchSecurityException.class)); + assertThat(cause.getMessage(), containsString("current license is non-compliant for [searchable-snapshots]")); + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/action/MountSearchableSnapshotRequestTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/action/MountSearchableSnapshotRequestTests.java new file mode 100644 index 00000000000..e63af77b6c4 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/action/MountSearchableSnapshotRequestTests.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.common.Strings; +import org.elasticsearch.common.io.stream.Writeable; +import org.elasticsearch.common.settings.Settings; +import org.elasticsearch.common.unit.TimeValue; +import org.elasticsearch.test.AbstractWireSerializingTestCase; +import org.elasticsearch.xpack.core.searchablesnapshots.MountSearchableSnapshotRequest; + +import java.util.Arrays; + +public class MountSearchableSnapshotRequestTests extends AbstractWireSerializingTestCase { + + private MountSearchableSnapshotRequest randomState(MountSearchableSnapshotRequest instance) { + return new MountSearchableSnapshotRequest( + randomBoolean() ? instance.mountedIndexName() : mutateString(instance.mountedIndexName()), + randomBoolean() ? instance.repositoryName() : mutateString(instance.repositoryName()), + randomBoolean() ? instance.snapshotName() : mutateString(instance.snapshotName()), + randomBoolean() ? instance.snapshotIndexName() : mutateString(instance.snapshotIndexName()), + randomBoolean() ? instance.indexSettings() : mutateSettings(instance.indexSettings()), + randomBoolean() ? instance.ignoreIndexSettings() : mutateStringArray(instance.ignoreIndexSettings()), + randomBoolean() + ).masterNodeTimeout(randomBoolean() ? instance.masterNodeTimeout() : mutateTimeValue(instance.masterNodeTimeout())); + } + + @Override + protected MountSearchableSnapshotRequest createTestInstance() { + return randomState( + new MountSearchableSnapshotRequest( + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + randomAlphaOfLength(5), + Settings.EMPTY, + Strings.EMPTY_ARRAY, + randomBoolean() + ) + ); + } + + @Override + protected Writeable.Reader instanceReader() { + return MountSearchableSnapshotRequest::new; + } + + @Override + protected MountSearchableSnapshotRequest mutateInstance(MountSearchableSnapshotRequest req) { + switch (randomInt(7)) { + case 0: + return new MountSearchableSnapshotRequest( + mutateString(req.mountedIndexName()), + req.repositoryName(), + req.snapshotName(), + req.snapshotIndexName(), + req.indexSettings(), + req.ignoreIndexSettings(), + req.waitForCompletion() + ).masterNodeTimeout(req.masterNodeTimeout()); + case 1: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + mutateString(req.repositoryName()), + req.snapshotName(), + req.snapshotIndexName(), + req.indexSettings(), + req.ignoreIndexSettings(), + req.waitForCompletion() + ).masterNodeTimeout(req.masterNodeTimeout()); + case 2: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + req.repositoryName(), + mutateString(req.snapshotName()), + req.snapshotIndexName(), + req.indexSettings(), + req.ignoreIndexSettings(), + req.waitForCompletion() + ).masterNodeTimeout(req.masterNodeTimeout()); + case 3: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + req.repositoryName(), + req.snapshotName(), + mutateString(req.snapshotIndexName()), + req.indexSettings(), + req.ignoreIndexSettings(), + req.waitForCompletion() + ).masterNodeTimeout(req.masterNodeTimeout()); + case 4: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + req.repositoryName(), + req.snapshotName(), + req.snapshotIndexName(), + mutateSettings(req.indexSettings()), + req.ignoreIndexSettings(), + req.waitForCompletion() + ).masterNodeTimeout(req.masterNodeTimeout()); + case 5: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + req.repositoryName(), + req.snapshotName(), + req.snapshotIndexName(), + req.indexSettings(), + mutateStringArray(req.ignoreIndexSettings()), + req.waitForCompletion() + ).masterNodeTimeout(req.masterNodeTimeout()); + case 6: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + req.repositoryName(), + req.snapshotName(), + req.snapshotIndexName(), + req.indexSettings(), + req.ignoreIndexSettings(), + req.waitForCompletion() == false + ).masterNodeTimeout(req.masterNodeTimeout()); + + default: + return new MountSearchableSnapshotRequest( + req.mountedIndexName(), + req.repositoryName(), + req.snapshotName(), + req.snapshotIndexName(), + req.indexSettings(), + req.ignoreIndexSettings(), + req.waitForCompletion() + ).masterNodeTimeout(mutateTimeValue(req.masterNodeTimeout())); + } + } + + private static TimeValue mutateTimeValue(TimeValue timeValue) { + long millis = timeValue.millis(); + long newMillis = randomValueOtherThan(millis, () -> randomLongBetween(0, 60000)); + return TimeValue.timeValueMillis(newMillis); + } + + private static String mutateString(String string) { + return randomAlphaOfLength(11 - string.length()); + } + + private static Settings mutateSettings(Settings settings) { + if (settings.size() < 5 && (settings.isEmpty() || randomBoolean())) { + return Settings.builder().put(settings).put(randomAlphaOfLength(3), randomAlphaOfLength(3)).build(); + } else { + return Settings.EMPTY; + } + } + + private static String[] mutateStringArray(String[] strings) { + if (strings.length < 5 && (strings.length == 0 || randomBoolean())) { + String[] newStrings = Arrays.copyOf(strings, strings.length + 1); + newStrings[strings.length] = randomAlphaOfLength(3); + return newStrings; + } else if (randomBoolean()) { + String[] newStrings = Arrays.copyOf(strings, strings.length); + int i = randomIntBetween(0, newStrings.length - 1); + newStrings[i] = mutateString(newStrings[i]); + return newStrings; + } else { + return Strings.EMPTY_ARRAY; + } + } +} diff --git a/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponseTests.java b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponseTests.java new file mode 100644 index 00000000000..fd04ce4b7f2 --- /dev/null +++ b/x-pack/plugin/searchable-snapshots/src/test/java/org/elasticsearch/xpack/searchablesnapshots/action/SearchableSnapshotsStatsResponseTests.java @@ -0,0 +1,123 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +package org.elasticsearch.xpack.searchablesnapshots.action; + +import org.elasticsearch.Version; +import org.elasticsearch.action.support.DefaultShardOperationFailedException; +import org.elasticsearch.cluster.routing.ShardRouting; +import org.elasticsearch.cluster.routing.ShardRoutingState; +import org.elasticsearch.repositories.IndexId; +import org.elasticsearch.snapshots.SnapshotId; +import org.elasticsearch.test.ESTestCase; +import org.elasticsearch.xpack.core.searchablesnapshots.SearchableSnapshotShardStats; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting; +import static org.hamcrest.CoreMatchers.equalTo; + +public class SearchableSnapshotsStatsResponseTests extends ESTestCase { + + public void testSerialization() throws IOException { + for (int i = 0; i < randomIntBetween(10, 50); i++) { + final SearchableSnapshotsStatsResponse testInstance = createTestInstance(); + final SearchableSnapshotsStatsResponse deserializedInstance = copyWriteable( + testInstance, + writableRegistry(), + SearchableSnapshotsStatsResponse::new, + Version.CURRENT + ); + assertEqualInstances(testInstance, deserializedInstance); + } + } + + private void assertEqualInstances(SearchableSnapshotsStatsResponse expected, SearchableSnapshotsStatsResponse actual) { + assertThat(actual.getTotalShards(), equalTo(expected.getTotalShards())); + assertThat(actual.getSuccessfulShards(), equalTo(expected.getSuccessfulShards())); + assertThat(actual.getFailedShards(), equalTo(expected.getFailedShards())); + DefaultShardOperationFailedException[] originalFailures = expected.getShardFailures(); + DefaultShardOperationFailedException[] parsedFailures = actual.getShardFailures(); + assertThat(originalFailures.length, equalTo(parsedFailures.length)); + for (int i = 0; i < originalFailures.length; i++) { + assertThat(originalFailures[i].index(), equalTo(parsedFailures[i].index())); + assertThat(originalFailures[i].shardId(), equalTo(parsedFailures[i].shardId())); + assertThat(originalFailures[i].status(), equalTo(parsedFailures[i].status())); + } + assertThat(actual.getStats(), equalTo(expected.getStats())); + } + + private SearchableSnapshotsStatsResponse createTestInstance() { + final int totalShards = randomIntBetween(0, 20); + final int successfulShards = totalShards > 0 ? randomIntBetween(0, totalShards) : 0; + final int failedShards = totalShards - successfulShards; + + final String indexName = randomAlphaOfLength(10); + final int replicas = randomIntBetween(0, 2); + final SnapshotId snapshotId = new SnapshotId(randomAlphaOfLength(5), randomAlphaOfLength(5)); + final IndexId indexId = new IndexId(randomAlphaOfLength(5), randomAlphaOfLength(5)); + + final List shardStats = new ArrayList<>(); + final List shardFailures = new ArrayList<>(); + + for (int i = 0; i < totalShards; i++) { + if (i < successfulShards) { + shardStats.add(createSearchableSnapshotShardStats(indexName, i, true, snapshotId, indexId)); + for (int j = 0; j < replicas; j++) { + shardStats.add(createSearchableSnapshotShardStats(indexName, i, false, snapshotId, indexId)); + } + } else { + shardFailures.add(new DefaultShardOperationFailedException(indexName, i, new Exception())); + } + } + + return new SearchableSnapshotsStatsResponse(shardStats, totalShards, successfulShards, failedShards, shardFailures); + } + + private static SearchableSnapshotShardStats createSearchableSnapshotShardStats( + String index, + int shardId, + boolean primary, + SnapshotId snapshotId, + IndexId indexId + ) { + final ShardRouting shardRouting = newShardRouting(index, shardId, randomAlphaOfLength(5), primary, ShardRoutingState.STARTED); + final List inputStats = new ArrayList<>(); + for (int j = 0; j < randomInt(10); j++) { + inputStats.add(randomCacheIndexInputStats()); + } + return new SearchableSnapshotShardStats(shardRouting, snapshotId, indexId, inputStats); + } + + private static SearchableSnapshotShardStats.CacheIndexInputStats randomCacheIndexInputStats() { + return new SearchableSnapshotShardStats.CacheIndexInputStats( + randomAlphaOfLength(10), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomNonNegativeLong(), + randomCounter(), + randomCounter(), + randomCounter(), + randomCounter(), + randomCounter(), + randomCounter(), + randomCounter(), + randomTimedCounter(), + randomTimedCounter(), + randomTimedCounter() + ); + } + + private static SearchableSnapshotShardStats.Counter randomCounter() { + return new SearchableSnapshotShardStats.Counter(randomLong(), randomLong(), randomLong(), randomLong()); + } + + private static SearchableSnapshotShardStats.TimedCounter randomTimedCounter() { + return new SearchableSnapshotShardStats.TimedCounter(randomLong(), randomLong(), randomLong(), randomLong(), randomLong()); + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.clear_cache.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.clear_cache.json new file mode 100644 index 00000000000..216cedd93ea --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.clear_cache.json @@ -0,0 +1,55 @@ +{ + "searchable_snapshots.clear_cache": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-clear-cache.html" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_searchable_snapshots/cache/clear", + "methods": [ + "POST" + ] + }, + { + "path": "/{index}/_searchable_snapshots/cache/clear", + "methods": [ + "POST" + ], + "parts": { + "index": { + "type": "list", + "description": "A comma-separated list of index names" + } + } + } + ] + }, + "params": { + "ignore_unavailable": { + "type": "boolean", + "description": "Whether specified concrete indices should be ignored when unavailable (missing or closed)" + }, + "allow_no_indices": { + "type": "boolean", + "description": "Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified)" + }, + "expand_wildcards": { + "type": "enum", + "options": [ + "open", + "closed", + "none", + "all" + ], + "default": "open", + "description": "Whether to expand wildcard expression to concrete indices that are open, closed or both." + }, + "index": { + "type": "list", + "description": "A comma-separated list of index name to limit the operation" + } + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.mount.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.mount.json new file mode 100644 index 00000000000..995944cf8f9 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.mount.json @@ -0,0 +1,43 @@ +{ + "searchable_snapshots.mount": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-mount-snapshot" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_snapshot/{repository}/{snapshot}/_mount", + "methods": [ + "POST" + ], + "parts": { + "repository": { + "type": "string", + "description": "The name of the repository containing the snapshot of the index to mount" + }, + "snapshot": { + "type": "string", + "description": "The name of the snapshot of the index to mount" + } + } + } + ] + }, + "params": { + "master_timeout":{ + "type":"time", + "description":"Explicit operation timeout for connection to master node" + }, + "wait_for_completion":{ + "type":"boolean", + "description":"Should this request wait until the operation has completed before returning", + "default":false + } + }, + "body":{ + "description":"The restore configuration for mounting the snapshot as searchable", + "required":true + } + } +} diff --git a/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.stats.json b/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.stats.json new file mode 100644 index 00000000000..b34fd7084b1 --- /dev/null +++ b/x-pack/plugin/src/test/resources/rest-api-spec/api/searchable_snapshots.stats.json @@ -0,0 +1,30 @@ +{ + "searchable_snapshots.stats": { + "documentation": { + "url": "https://www.elastic.co/guide/en/elasticsearch/reference/current/searchable-snapshots-api-stats.html" + }, + "stability": "experimental", + "url": { + "paths": [ + { + "path": "/_searchable_snapshots/stats", + "methods": [ + "GET" + ] + }, + { + "path": "/{index}/_searchable_snapshots/stats", + "methods": [ + "GET" + ], + "parts": { + "index": { + "type": "list", + "description": "A comma-separated list of index names" + } + } + } + ] + } + } +}