[7.x] ILM: add force-merge step to searchable snapshots action (#60819) (#60882)

This adds a force-merge step to the searchable snapshot action, enabled by default,
but parameterizable using the `force_merge-index" optional boolean.

eg.
```
PUT _ilm/policy/my_policy
{
  "policy": {
    "phases": {
      "cold": {
        "actions": {
          "searchable_snapshot" : {
            "snapshot_repository" : "backing_repo",
            "force_merge_index": true
          }
        }
      }
    }
  }
}
```

(cherry picked from commit d0a17b2d35f1b083b574246bdbf3e1929471a4a9)
Signed-off-by: Andrei Dan <andrei.dan@elastic.co>
This commit is contained in:
Andrei Dan 2020-08-10 13:45:11 +01:00 committed by GitHub
parent e03993d238
commit 235e5ed3ea
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 408 additions and 198 deletions

View File

@ -12,6 +12,10 @@ This action makes the index <<dynamic-index-settings,read-only>>.
To use the `forcemerge` action in the `hot` phase, the `rollover` action *must* be present.
If no rollover action is configured, {ilm-init} will reject the policy.
[NOTE]
The `forcemerge` action is best effort. It might happen that some of the
shards are relocating, in which case they will not be merged.
[[ilm-forcemerge-options]]
==== Options

View File

@ -30,6 +30,20 @@ To keep the snapshot, set `delete_searchable_snapshot` to `false` in the delete
Specifies where to store the snapshot.
See <<snapshots-register-repository>> for more information.
`force_merge_index`::
(Optional, boolean)
Force merges the managed index to one segment.
Defaults to `true`.
If the managed index was already force merged using the
<<ilm-forcemerge, force merge action>> in a previous action
the `searchable snapshot` action force merge step will be a no-op.
[NOTE]
The `forcemerge` action is best effort. It might happen that some of
the shards are relocating, in which case they will not be merged.
The `searchable-snapshot` action will continue executing even if not all shards
are force merged.
[[ilm-searchable-snapshot-ex]]
==== Examples
[source,console]

View File

@ -1046,7 +1046,7 @@ public abstract class ESRestTestCase extends ESTestCase {
* in an non green state
* @param index index to test for
**/
protected static void ensureGreen(String index) throws IOException {
public static void ensureGreen(String index) throws IOException {
ensureHealth(index, (request) -> {
request.addParameter("wait_for_status", "green");
request.addParameter("wait_for_no_relocating_shards", "true");

View File

@ -5,6 +5,7 @@
*/
package org.elasticsearch.xpack.core.ilm;
import org.elasticsearch.Version;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexAbstraction;
@ -18,7 +19,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.xpack.core.ilm.Step.StepKey;
import java.io.IOException;
import java.util.Arrays;
import java.util.ArrayList;
import java.util.List;
import java.util.Objects;
@ -31,15 +32,17 @@ public class SearchableSnapshotAction implements LifecycleAction {
public static final String NAME = "searchable_snapshot";
public static final ParseField SNAPSHOT_REPOSITORY = new ParseField("snapshot_repository");
public static final ParseField FORCE_MERGE_INDEX = new ParseField("force_merge_index");
public static final String CONDITIONAL_DATASTREAM_CHECK_KEY = BranchingStep.NAME + "-on-datastream-check";
public static final String RESTORED_INDEX_PREFIX = "restored-";
private static final ConstructingObjectParser<SearchableSnapshotAction, Void> PARSER = new ConstructingObjectParser<>(NAME,
a -> new SearchableSnapshotAction((String) a[0]));
a -> new SearchableSnapshotAction((String) a[0], a[1] == null || (boolean) a[1]));
static {
PARSER.declareString(ConstructingObjectParser.constructorArg(), SNAPSHOT_REPOSITORY);
PARSER.declareBoolean(ConstructingObjectParser.optionalConstructorArg(), FORCE_MERGE_INDEX);
}
public static SearchableSnapshotAction parse(XContentParser parser) {
@ -47,22 +50,34 @@ public class SearchableSnapshotAction implements LifecycleAction {
}
private final String snapshotRepository;
private final boolean forceMergeIndex;
public SearchableSnapshotAction(String snapshotRepository) {
public SearchableSnapshotAction(String snapshotRepository, boolean forceMergeIndex) {
if (Strings.hasText(snapshotRepository) == false) {
throw new IllegalArgumentException("the snapshot repository must be specified");
}
this.snapshotRepository = snapshotRepository;
this.forceMergeIndex = forceMergeIndex;
}
public SearchableSnapshotAction(String snapshotRepository) {
this(snapshotRepository, true);
}
public SearchableSnapshotAction(StreamInput in) throws IOException {
this(in.readString());
this(in.readString(), in.getVersion().onOrAfter(Version.V_7_10_0) ? in.readBoolean() : true);
}
boolean isForceMergeIndex() {
return forceMergeIndex;
}
@Override
public List<Step> toSteps(Client client, String phase, StepKey nextStepKey) {
StepKey checkNoWriteIndex = new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME);
StepKey waitForNoFollowerStepKey = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME);
StepKey forceMergeStepKey = new StepKey(phase, NAME, ForceMergeStep.NAME);
StepKey waitForSegmentCountKey = new StepKey(phase, NAME, SegmentCountStep.NAME);
StepKey generateSnapshotNameKey = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME);
StepKey cleanSnapshotKey = new StepKey(phase, NAME, CleanupSnapshotStep.NAME);
StepKey createSnapshotKey = new StepKey(phase, NAME, CreateSnapshotStep.NAME);
@ -77,8 +92,14 @@ public class SearchableSnapshotAction implements LifecycleAction {
CheckNotDataStreamWriteIndexStep checkNoWriteIndexStep = new CheckNotDataStreamWriteIndexStep(checkNoWriteIndex,
waitForNoFollowerStepKey);
WaitForNoFollowersStep waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, generateSnapshotNameKey,
client);
final WaitForNoFollowersStep waitForNoFollowersStep;
if (forceMergeIndex) {
waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, forceMergeStepKey, client);
} else {
waitForNoFollowersStep = new WaitForNoFollowersStep(waitForNoFollowerStepKey, generateSnapshotNameKey, client);
}
ForceMergeStep forceMergeStep = new ForceMergeStep(forceMergeStepKey, waitForSegmentCountKey, client, 1);
SegmentCountStep segmentCountStep = new SegmentCountStep(waitForSegmentCountKey, generateSnapshotNameKey, client, 1);
GenerateSnapshotNameStep generateSnapshotNameStep = new GenerateSnapshotNameStep(generateSnapshotNameKey, cleanSnapshotKey,
snapshotRepository);
CleanupSnapshotStep cleanupSnapshotStep = new CleanupSnapshotStep(cleanSnapshotKey, createSnapshotKey, client);
@ -108,9 +129,25 @@ public class SearchableSnapshotAction implements LifecycleAction {
SwapAliasesAndDeleteSourceIndexStep swapAliasesAndDeleteSourceIndexStep = new SwapAliasesAndDeleteSourceIndexStep(swapAliasesKey,
null, client, RESTORED_INDEX_PREFIX);
return Arrays.asList(checkNoWriteIndexStep, waitForNoFollowersStep, generateSnapshotNameStep, cleanupSnapshotStep,
createSnapshotBranchingStep, mountSnapshotStep, waitForGreenIndexHealthStep, copyMetadataStep, copySettingsStep,
isDataStreamBranchingStep, replaceDataStreamBackingIndex, deleteSourceIndexStep, swapAliasesAndDeleteSourceIndexStep);
List<Step> steps = new ArrayList<>();
steps.add(checkNoWriteIndexStep);
steps.add(waitForNoFollowersStep);
if (forceMergeIndex) {
steps.add(forceMergeStep);
steps.add(segmentCountStep);
}
steps.add(generateSnapshotNameStep);
steps.add(cleanupSnapshotStep);
steps.add(createSnapshotBranchingStep);
steps.add(mountSnapshotStep);
steps.add(waitForGreenIndexHealthStep);
steps.add(copyMetadataStep);
steps.add(copySettingsStep);
steps.add(isDataStreamBranchingStep);
steps.add(replaceDataStreamBackingIndex);
steps.add(deleteSourceIndexStep);
steps.add(swapAliasesAndDeleteSourceIndexStep);
return steps;
}
@Override
@ -126,12 +163,16 @@ public class SearchableSnapshotAction implements LifecycleAction {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(snapshotRepository);
if (out.getVersion().onOrAfter(Version.V_7_10_0)) {
out.writeBoolean(forceMergeIndex);
}
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field(SNAPSHOT_REPOSITORY.getPreferredName(), snapshotRepository);
builder.field(FORCE_MERGE_INDEX.getPreferredName(), forceMergeIndex);
builder.endObject();
return builder;
}

View File

@ -57,8 +57,7 @@ public class SegmentCountStep extends AsyncWaitStep {
if (idxSegments == null || (response.getShardFailures() != null && response.getShardFailures().length > 0)) {
final DefaultShardOperationFailedException[] failures = response.getShardFailures();
logger.info("[{}] retrieval of segment counts after force merge did not succeed, " +
"there were {} shard failures. " +
"failures: {}",
"there were {} shard failures. failures: {}",
index.getName(),
response.getFailedShards(),
failures == null ? "n/a" : Strings.collectionToDelimitedString(Arrays.stream(failures)

View File

@ -20,42 +20,73 @@ public class SearchableSnapshotActionTests extends AbstractActionTestCase<Search
@Override
public void testToSteps() {
String phase = randomAlphaOfLengthBetween(1, 10);
StepKey expectedFirstStep = new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME);
StepKey expectedSecondStep = new StepKey(phase, NAME, WaitForNoFollowersStep.NAME);
StepKey expectedThirdStep = new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME);
StepKey expectedFourthStep = new StepKey(phase, NAME, CleanupSnapshotStep.NAME);
StepKey expectedFifthStep = new StepKey(phase, NAME, CreateSnapshotStep.NAME);
StepKey expectedSixthStep = new StepKey(phase, NAME, MountSnapshotStep.NAME);
StepKey expectedSeventhStep = new StepKey(phase, NAME, WaitForIndexColorStep.NAME);
StepKey expectedEighthStep = new StepKey(phase, NAME, CopyExecutionStateStep.NAME);
StepKey expectedNinthStep = new StepKey(phase, NAME, CopySettingsStep.NAME);
StepKey expectedTenthStep = new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_DATASTREAM_CHECK_KEY);
StepKey expectedElevenStep = new StepKey(phase, NAME, ReplaceDataStreamBackingIndexStep.NAME);
StepKey expectedTwelveStep = new StepKey(phase, NAME, DeleteStep.NAME);
StepKey expectedThirteenStep = new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME);
SearchableSnapshotAction action = createTestInstance();
StepKey nextStepKey = new StepKey(phase, randomAlphaOfLengthBetween(1, 5), randomAlphaOfLengthBetween(1, 5));
List<Step> steps = action.toSteps(null, phase, nextStepKey);
assertThat(steps.size(), is(13));
assertThat(steps.size(), is(action.isForceMergeIndex() ? 15 : 13));
assertThat(steps.get(0).getKey(), is(expectedFirstStep));
assertThat(steps.get(1).getKey(), is(expectedSecondStep));
assertThat(steps.get(2).getKey(), is(expectedThirdStep));
assertThat(steps.get(3).getKey(), is(expectedFourthStep));
assertThat(steps.get(4).getKey(), is(expectedFifthStep));
assertThat(steps.get(5).getKey(), is(expectedSixthStep));
assertThat(steps.get(6).getKey(), is(expectedSeventhStep));
assertThat(steps.get(7).getKey(), is(expectedEighthStep));
assertThat(steps.get(8).getKey(), is(expectedNinthStep));
assertThat(steps.get(9).getKey(), is(expectedTenthStep));
assertThat(steps.get(10).getKey(), is(expectedElevenStep));
assertThat(steps.get(11).getKey(), is(expectedTwelveStep));
assertThat(steps.get(12).getKey(), is(expectedThirteenStep));
List<StepKey> expectedSteps = action.isForceMergeIndex() ? expectedStepKeysWithForceMerge(phase) :
expectedStepKeysNoForceMerge(phase);
AsyncActionBranchingStep branchStep = (AsyncActionBranchingStep) steps.get(4);
assertThat(branchStep.getNextKeyOnIncompleteResponse(), is(expectedFourthStep));
assertThat(steps.get(0).getKey(), is(expectedSteps.get(0)));
assertThat(steps.get(1).getKey(), is(expectedSteps.get(1)));
assertThat(steps.get(2).getKey(), is(expectedSteps.get(2)));
assertThat(steps.get(3).getKey(), is(expectedSteps.get(3)));
assertThat(steps.get(4).getKey(), is(expectedSteps.get(4)));
assertThat(steps.get(5).getKey(), is(expectedSteps.get(5)));
assertThat(steps.get(6).getKey(), is(expectedSteps.get(6)));
assertThat(steps.get(7).getKey(), is(expectedSteps.get(7)));
assertThat(steps.get(8).getKey(), is(expectedSteps.get(8)));
assertThat(steps.get(9).getKey(), is(expectedSteps.get(9)));
assertThat(steps.get(10).getKey(), is(expectedSteps.get(10)));
assertThat(steps.get(11).getKey(), is(expectedSteps.get(11)));
assertThat(steps.get(12).getKey(), is(expectedSteps.get(12)));
if (action.isForceMergeIndex()) {
assertThat(steps.get(13).getKey(), is(expectedSteps.get(13)));
AsyncActionBranchingStep branchStep = (AsyncActionBranchingStep) steps.get(6);
assertThat(branchStep.getNextKeyOnIncompleteResponse(), is(expectedSteps.get(5)));
} else {
AsyncActionBranchingStep branchStep = (AsyncActionBranchingStep) steps.get(4);
assertThat(branchStep.getNextKeyOnIncompleteResponse(), is(expectedSteps.get(3)));
}
}
private List<StepKey> expectedStepKeysWithForceMerge(String phase) {
return org.elasticsearch.common.collect.List.of(
new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME),
new StepKey(phase, NAME, WaitForNoFollowersStep.NAME),
new StepKey(phase, NAME, ForceMergeStep.NAME),
new StepKey(phase, NAME, SegmentCountStep.NAME),
new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME),
new StepKey(phase, NAME, CleanupSnapshotStep.NAME),
new StepKey(phase, NAME, CreateSnapshotStep.NAME),
new StepKey(phase, NAME, MountSnapshotStep.NAME),
new StepKey(phase, NAME, WaitForIndexColorStep.NAME),
new StepKey(phase, NAME, CopyExecutionStateStep.NAME),
new StepKey(phase, NAME, CopySettingsStep.NAME),
new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_DATASTREAM_CHECK_KEY),
new StepKey(phase, NAME, ReplaceDataStreamBackingIndexStep.NAME),
new StepKey(phase, NAME, DeleteStep.NAME),
new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME));
}
private List<StepKey> expectedStepKeysNoForceMerge(String phase) {
return org.elasticsearch.common.collect.List.of(
new StepKey(phase, NAME, CheckNotDataStreamWriteIndexStep.NAME),
new StepKey(phase, NAME, WaitForNoFollowersStep.NAME),
new StepKey(phase, NAME, GenerateSnapshotNameStep.NAME),
new StepKey(phase, NAME, CleanupSnapshotStep.NAME),
new StepKey(phase, NAME, CreateSnapshotStep.NAME),
new StepKey(phase, NAME, MountSnapshotStep.NAME),
new StepKey(phase, NAME, WaitForIndexColorStep.NAME),
new StepKey(phase, NAME, CopyExecutionStateStep.NAME),
new StepKey(phase, NAME, CopySettingsStep.NAME),
new StepKey(phase, NAME, SearchableSnapshotAction.CONDITIONAL_DATASTREAM_CHECK_KEY),
new StepKey(phase, NAME, ReplaceDataStreamBackingIndexStep.NAME),
new StepKey(phase, NAME, DeleteStep.NAME),
new StepKey(phase, NAME, SwapAliasesAndDeleteSourceIndexStep.NAME));
}
@Override
@ -79,6 +110,6 @@ public class SearchableSnapshotActionTests extends AbstractActionTestCase<Search
}
static SearchableSnapshotAction randomInstance() {
return new SearchableSnapshotAction(randomAlphaOfLengthBetween(5, 10));
return new SearchableSnapshotAction(randomAlphaOfLengthBetween(5, 10), randomBoolean());
}
}

View File

@ -15,6 +15,7 @@ import org.elasticsearch.client.Response;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
@ -36,12 +37,15 @@ import java.io.IOException;
import java.io.InputStream;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.test.ESTestCase.randomAlphaOfLengthBetween;
import static org.elasticsearch.test.ESTestCase.randomBoolean;
import static org.elasticsearch.test.rest.ESRestTestCase.ensureGreen;
/**
* This class provides the operational REST functions needed to control an ILM time series lifecycle.
@ -204,4 +208,36 @@ public final class TimeSeriesRestDriver {
}
}
public static void createIndexWithSettings(RestClient client, String index, String alias, Settings.Builder settings)
throws IOException {
createIndexWithSettings(client, index, alias, settings, randomBoolean());
}
public static void createIndexWithSettings(RestClient client, String index, String alias, Settings.Builder settings,
boolean useWriteIndex) throws IOException {
Request request = new Request("PUT", "/" + index);
String writeIndexSnippet = "";
if (useWriteIndex) {
writeIndexSnippet = "\"is_write_index\": true";
}
request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings.build())
+ ", \"aliases\" : { \"" + alias + "\": { " + writeIndexSnippet + " } } }");
client.performRequest(request);
// wait for the shards to initialize
ensureGreen(index);
}
@SuppressWarnings("unchecked")
public static Integer getNumberOfSegments(RestClient client, String index) throws IOException {
Response response = client.performRequest(new Request("GET", index + "/_segments"));
XContentType entityContentType = XContentType.fromMediaTypeOrFormat(response.getEntity().getContentType().getValue());
Map<String, Object> responseEntity = XContentHelper.convertToMap(entityContentType.xContent(),
response.getEntity().getContent(), false);
responseEntity = (Map<String, Object>) responseEntity.get("indices");
responseEntity = (Map<String, Object>) responseEntity.get(index);
responseEntity = (Map<String, Object>) responseEntity.get("shards");
List<Map<String, Object>> shards = (List<Map<String, Object>>) responseEntity.get("0");
return (Integer) shards.get(0).get("num_search_segments");
}
}

View File

@ -223,10 +223,10 @@ public class TimeSeriesDataStreamsIT extends ESRestTestCase {
}
private static Template getTemplate(String policyName) throws IOException {
return new Template(getLifcycleSettings(policyName), null, null);
return new Template(getLifecycleSettings(policyName), null, null);
}
private static Settings getLifcycleSettings(String policyName) {
private static Settings getLifecycleSettings(String policyName) {
return Settings.builder()
.put(LifecycleSettings.LIFECYCLE_NAME, policyName)
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)

View File

@ -8,7 +8,6 @@ package org.elasticsearch.xpack.ilm;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.client.Request;
@ -62,15 +61,16 @@ import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import java.util.function.Supplier;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createFullPolicy;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createIndexWithSettings;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createSnapshotRepo;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.explain;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.explainIndex;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.getNumberOfSegments;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.getOnlyIndexSettings;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.getStepKeyForIndex;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.indexDocument;
@ -113,7 +113,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
String originalIndex = index + "-000001";
String shrunkenOriginalIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + originalIndex;
String secondIndex = index + "-000002";
createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
createIndexWithSettings(client(), originalIndex, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put("index.routing.allocation.include._name", "integTest-0")
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias));
@ -141,7 +141,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testMoveToAllocateStep() throws Exception {
String originalIndex = index + "-000001";
createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
createIndexWithSettings(client(), originalIndex, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put("index.routing.allocation.include._name", "integTest-0")
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, "alias"));
@ -175,7 +175,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
String originalIndex = index + "-000001";
String shrunkenOriginalIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + originalIndex;
String secondIndex = index + "-000002";
createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
createIndexWithSettings(client(), originalIndex, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 4)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put("index.routing.allocation.include._name", "integTest-0")
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias));
@ -219,7 +219,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testRetryFailedDeleteAction() throws Exception {
createNewSingletonPolicy(client(), policy, "delete", new DeleteAction());
createIndexWithSettings(index, Settings.builder()
createIndexWithSettings(client(), index, alias, Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_READ_ONLY, true)
@ -239,7 +239,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testRetryFreezeDeleteAction() throws Exception {
createNewSingletonPolicy(client(), policy, "cold", new FreezeAction());
createIndexWithSettings(index, Settings.builder()
createIndexWithSettings(client(), index, alias, Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetadata.SETTING_READ_ONLY, true)
@ -261,7 +261,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
int divisor = randomFrom(2, 4);
int expectedFinalShards = numShards / divisor;
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numShards + randomIntBetween(1, numShards)));
updatePolicy(index, policy);
@ -294,7 +294,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testRolloverAction() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), originalIndex, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias));
@ -313,7 +313,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testRolloverActionWithIndexingComplete() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), originalIndex, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias));
@ -352,7 +352,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testAllocateOnlyAllocation() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
String allocateNodeName = "integTest-" + randomFrom(0, 1);
AllocateAction allocateAction = new AllocateAction(null, null, null, singletonMap("_name", allocateNodeName));
@ -369,7 +369,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
int numShards = randomFrom(1, 5);
int numReplicas = randomFrom(0, 1);
int finalNumReplicas = (numReplicas + 1) % 2;
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, numReplicas));
AllocateAction allocateAction = new AllocateAction(finalNumReplicas, null, null, null);
String endPhase = randomFrom("warm", "cold");
@ -383,7 +383,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testWaitForSnapshot() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
String slmPolicy = randomAlphaOfLengthBetween(4, 10);
createNewSingletonPolicy(client(), policy, "delete", new WaitForSnapshotAction(slmPolicy));
@ -413,7 +413,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testWaitForSnapshotSlmExecutedBefore() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
String slmPolicy = randomAlphaOfLengthBetween(4, 10);
createNewSingletonPolicy(client(), policy, "delete", new WaitForSnapshotAction(slmPolicy));
@ -459,7 +459,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testDelete() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "delete", new DeleteAction());
updatePolicy(index, policy);
@ -467,7 +467,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testDeleteOnlyShouldNotMakeIndexReadonly() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "delete", new DeleteAction(), TimeValue.timeValueHours(1));
updatePolicy(index, policy);
@ -496,7 +496,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// create delete policy
createNewSingletonPolicy(client(), policy, "delete", new DeleteAction(), TimeValue.timeValueMillis(0));
// create index without policy
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
// index document so snapshot actually does something
indexDocument(client(), index);
@ -519,7 +519,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testReadOnly() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "warm", new ReadOnlyAction());
updatePolicy(index, policy);
@ -530,9 +530,8 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
});
}
@SuppressWarnings("unchecked")
public void forceMergeActionWithCodec(String codec) throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
for (int i = 0; i < randomIntBetween(2, 10); i++) {
Request request = new Request("PUT", index + "/_doc/" + i);
@ -541,26 +540,14 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
client().performRequest(request);
}
Supplier<Integer> numSegments = () -> {
try {
Map<String, Object> segmentResponse = getAsMap(index + "/_segments");
segmentResponse = (Map<String, Object>) segmentResponse.get("indices");
segmentResponse = (Map<String, Object>) segmentResponse.get(index);
segmentResponse = (Map<String, Object>) segmentResponse.get("shards");
List<Map<String, Object>> shards = (List<Map<String, Object>>) segmentResponse.get("0");
return (Integer) shards.get(0).get("num_search_segments");
} catch (Exception e) {
throw new RuntimeException(e);
}
};
assertThat(numSegments.get(), greaterThan(1));
assertThat(getNumberOfSegments(client(), index), greaterThan(1));
createNewSingletonPolicy(client(), policy, "warm", new ForceMergeAction(1, codec));
updatePolicy(index, policy);
assertBusy(() -> {
assertThat(getStepKeyForIndex(client(), index), equalTo(PhaseCompleteStep.finalStep("warm").getKey()));
Map<String, Object> settings = getOnlyIndexSettings(client(), index);
assertThat(numSegments.get(), equalTo(1));
assertThat(getNumberOfSegments(client(), index), equalTo(1));
assertThat(settings.get(IndexMetadata.INDEX_BLOCKS_WRITE_SETTING.getKey()), equalTo("true"));
});
expectThrows(ResponseException.class, () -> indexDocument(client(), index));
@ -575,7 +562,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
int divisor = randomFrom(2, 4);
int expectedFinalShards = numShards / divisor;
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(expectedFinalShards));
updatePolicy(index, policy);
@ -594,7 +581,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testShrinkSameShards() throws Exception {
int numberOfShards = randomFrom(1, 2);
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numberOfShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(numberOfShards));
updatePolicy(index, policy);
@ -628,7 +615,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// create delete policy
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1), TimeValue.timeValueMillis(0));
// create index without policy
createIndexWithSettings(index, Settings.builder()
createIndexWithSettings(client(), index, alias, Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
// required so the shrink doesn't wait on SetSingleNodeAllocateStep
@ -665,7 +652,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
int numShards = 2;
int expectedFinalShards = 1;
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, numShards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
ensureGreen(index);
@ -717,7 +704,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testFreezeAction() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
createNewSingletonPolicy(client(), policy, "cold", new FreezeAction());
updatePolicy(index, policy);
@ -747,7 +734,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// create delete policy
createNewSingletonPolicy(client(), policy, "cold", new FreezeAction(), TimeValue.timeValueMillis(0));
// create index without policy
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0));
// index document so snapshot actually does something
indexDocument(client(), index);
@ -775,7 +762,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testSetPriority() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.INDEX_PRIORITY_SETTING.getKey(), 100));
int priority = randomIntBetween(0, 99);
createNewSingletonPolicy(client(), policy, "warm", new SetPriorityAction(priority));
@ -788,7 +775,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testSetNullPriority() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0).put(IndexMetadata.INDEX_PRIORITY_SETTING.getKey(), 100));
createNewSingletonPolicy(client(), policy, "warm", new SetPriorityAction((Integer) null));
updatePolicy(index, policy);
@ -913,7 +900,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// Set up a policy with rollover
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, 1L));
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -949,7 +938,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
String shrunkenIndex = ShrinkAction.SHRUNKEN_INDEX_PREFIX + index;
createNewSingletonPolicy(client(), policy, "warm", new ShrinkAction(1), TimeValue.timeValueHours(12));
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 3)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias));
@ -985,7 +974,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testMoveToStepRereadsPolicy() throws Exception {
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, TimeValue.timeValueHours(1), null), TimeValue.ZERO);
createIndexWithSettings("test-1", Settings.builder()
createIndexWithSettings(client(), "test-1", alias, Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -1020,7 +1009,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
}
public void testCanStopILMWithPolicyUsingNonexistentPolicy() throws Exception {
createIndexWithSettings(index, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), index, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME_SETTING.getKey(), randomAlphaOfLengthBetween(5,15)));
@ -1066,7 +1055,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
assertOK(client().performRequest(request));
}
createIndexWithSettings(goodIndex, Settings.builder()
createIndexWithSettings(client(), goodIndex, alias, Settings.builder()
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -1122,7 +1111,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// create the index as readonly and associate the ILM policy to it
createIndexWithSettings(
client(),
firstIndex,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -1168,7 +1159,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
client().performRequest(createIndexTemplate);
createIndexWithSettings(
client(),
originalIndex,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0),
true
@ -1213,7 +1206,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// create the rolled index so the rollover of the first index fails
createIndexWithSettings(
client(),
rolledIndex,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias),
@ -1221,7 +1216,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
);
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -1285,7 +1282,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, 1L));
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -1337,7 +1336,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
public void testWaitForActiveShardsStep() throws Exception {
String originalIndex = index + "-000001";
String secondIndex = index + "-000002";
createIndexWithSettings(originalIndex, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
createIndexWithSettings(client(), originalIndex, alias, Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(RolloverAction.LIFECYCLE_ROLLOVER_ALIAS, alias),
true);
@ -1384,7 +1383,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
"}");
client().performRequest(createIndexTemplate);
createIndexWithSettings(index + "-1", Settings.builder(), true);
createIndexWithSettings(client(), index + "-1", alias, Settings.builder(), true);
// Index a document
index(client(), index + "-1", "1", "foo", "bar");
@ -1410,7 +1409,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/50353")
public void testHistoryIsWrittenWithFailure() throws Exception {
createIndexWithSettings(index + "-1", Settings.builder(), false);
createIndexWithSettings(client(), index + "-1", alias, Settings.builder(), false);
createNewSingletonPolicy(client(), policy, "hot", new RolloverAction(null, null, 1L));
updatePolicy(index + "-1", policy);
@ -1429,7 +1428,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/53718")
public void testHistoryIsWrittenWithDeletion() throws Exception {
// Index should be created and then deleted by ILM
createIndexWithSettings(index, Settings.builder(), false);
createIndexWithSettings(client(), index, alias, Settings.builder(), false);
createNewSingletonPolicy(client(), policy, "delete", new DeleteAction());
updatePolicy(index, policy);
@ -1453,7 +1452,9 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
// Create the index with the origination parsing turn *off* so it doesn't prevent creation
createIndexWithSettings(
client(),
index,
alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy)
@ -1504,7 +1505,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
"}");
client().performRequest(createIndexTemplate);
createIndexWithSettings(index + "-1",
createIndexWithSettings(client(), index + "-1", alias,
Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0),
true);
@ -1528,7 +1529,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
createNewSingletonPolicy(client(), policy, "hot", new SetPriorityAction(100));
createIndexWithSettings(index,
createIndexWithSettings(client(), index, alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
@ -1560,88 +1561,6 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
assertBusy(() -> assertFalse("expected " + index + " to be deleted by ILM", indexExists(index)));
}
public void testSearchableSnapshotAction() throws Exception {
String snapshotRepo = randomAlphaOfLengthBetween(4, 10);
createSnapshotRepo(client(), snapshotRepo, randomBoolean());
createNewSingletonPolicy(client(), policy, "cold", new SearchableSnapshotAction(snapshotRepo));
createIndexWithSettings(index,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy),
randomBoolean());
String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + this.index;
assertTrue(waitUntil(() -> {
try {
return indexExists(restoredIndexName);
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
assertBusy(() -> assertThat(explainIndex(client(), restoredIndexName).get("step"), is(PhaseCompleteStep.NAME)), 30,
TimeUnit.SECONDS);
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/54433")
public void testDeleteActionDeletesSearchableSnapshot() throws Exception {
String snapshotRepo = randomAlphaOfLengthBetween(4, 10);
createSnapshotRepo(client(), snapshotRepo, randomBoolean());
// create policy with cold and delete phases
Map<String, LifecycleAction> coldActions =
org.elasticsearch.common.collect.Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo));
Map<String, Phase> phases = new HashMap<>();
phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions));
phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME,
new DeleteAction(true))));
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases);
// PUT policy
XContentBuilder builder = jsonBuilder();
lifecyclePolicy.toXContent(builder, null);
final StringEntity entity = new StringEntity(
"{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON);
Request createPolicyRequest = new Request("PUT", "_ilm/policy/" + policy);
createPolicyRequest.setEntity(entity);
assertOK(client().performRequest(createPolicyRequest));
createIndexWithSettings(index,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put(LifecycleSettings.LIFECYCLE_NAME, policy),
randomBoolean());
String[] snapshotName = new String[1];
String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + this.index;
assertTrue(waitUntil(() -> {
try {
Map<String, Object> explainIndex = explainIndex(client(), index);
if(explainIndex == null) {
// in case we missed the original index and it was deleted
explainIndex = explainIndex(client(), restoredIndexName);
}
snapshotName[0] = (String) explainIndex.get("snapshot_name");
return snapshotName[0] != null;
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
assertBusy(() -> assertFalse(indexExists(restoredIndexName)));
assertTrue("the snapshot we generate in the cold phase should be deleted by the delete phase", waitUntil(() -> {
try {
Request getSnapshotsRequest = new Request("GET", "_snapshot/" + snapshotRepo + "/" + snapshotName[0]);
Response getSnapshotsResponse = client().performRequest(getSnapshotsRequest);
return EntityUtils.toString(getSnapshotsResponse.getEntity()).contains("snapshot_missing_exception");
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
}
@SuppressWarnings("unchecked")
public void testDeleteActionDoesntDeleteSearchableSnapshot() throws Exception {
String snapshotRepo = randomAlphaOfLengthBetween(4, 10);
@ -1664,7 +1583,7 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
createPolicyRequest.setEntity(entity);
assertOK(client().performRequest(createPolicyRequest));
createIndexWithSettings(index,
createIndexWithSettings(client(), index, alias,
Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
@ -1817,24 +1736,6 @@ public class TimeSeriesLifecycleActionsIT extends ESRestTestCase {
ensureGreen(index);
}
private void createIndexWithSettings(String index, Settings.Builder settings) throws IOException {
createIndexWithSettings(index, settings, randomBoolean());
}
private void createIndexWithSettings(String index, Settings.Builder settings, boolean useWriteIndex) throws IOException {
Request request = new Request("PUT", "/" + index);
String writeIndexSnippet = "";
if (useWriteIndex) {
writeIndexSnippet = "\"is_write_index\": true";
}
request.setJsonEntity("{\n \"settings\": " + Strings.toString(settings.build())
+ ", \"aliases\" : { \"" + alias + "\": { " + writeIndexSnippet + " } } }");
client().performRequest(request);
// wait for the shards to initialize
ensureGreen(index);
}
private static void index(RestClient client, String index, String id, Object... fields) throws IOException {
XContentBuilder document = jsonBuilder().startObject();
for (int i = 0; i < fields.length; i += 2) {

View File

@ -0,0 +1,184 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
package org.elasticsearch.xpack.ilm.actions;
import org.apache.http.entity.ContentType;
import org.apache.http.entity.StringEntity;
import org.apache.http.util.EntityUtils;
import org.elasticsearch.client.Request;
import org.elasticsearch.client.Response;
import org.elasticsearch.cluster.metadata.DataStream;
import org.elasticsearch.cluster.metadata.Template;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.test.rest.ESRestTestCase;
import org.elasticsearch.xpack.core.ilm.DeleteAction;
import org.elasticsearch.xpack.core.ilm.LifecycleAction;
import org.elasticsearch.xpack.core.ilm.LifecyclePolicy;
import org.elasticsearch.xpack.core.ilm.LifecycleSettings;
import org.elasticsearch.xpack.core.ilm.Phase;
import org.elasticsearch.xpack.core.ilm.PhaseCompleteStep;
import org.elasticsearch.xpack.core.ilm.SearchableSnapshotAction;
import org.junit.Before;
import java.io.IOException;
import java.util.HashMap;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.TimeUnit;
import static java.util.Collections.singletonMap;
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createComposableTemplate;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createNewSingletonPolicy;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.createSnapshotRepo;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.explainIndex;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.getNumberOfSegments;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.indexDocument;
import static org.elasticsearch.xpack.TimeSeriesRestDriver.rolloverMaxOneDocCondition;
import static org.hamcrest.Matchers.greaterThan;
import static org.hamcrest.Matchers.is;
public class SearchableSnapshotActionIT extends ESRestTestCase {
private String policy;
private String dataStream;
@Before
public void refreshIndex() {
dataStream = "logs-" + randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
policy = "policy-" + randomAlphaOfLength(5);
}
public void testSearchableSnapshotAction() throws Exception {
String snapshotRepo = randomAlphaOfLengthBetween(4, 10);
createSnapshotRepo(client(), snapshotRepo, randomBoolean());
createNewSingletonPolicy(client(), policy, "cold", new SearchableSnapshotAction(snapshotRepo, true));
createComposableTemplate(client(), "template-name", dataStream,
new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, null));
indexDocument(client(), dataStream, true);
// rolling over the data stream so we can apply the searchable snapshot policy to a backing index that's not the write index
rolloverMaxOneDocCondition(client(), dataStream);
String backingIndexName = DataStream.getDefaultBackingIndexName(dataStream, 1L);
String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + backingIndexName;
assertTrue(waitUntil(() -> {
try {
return indexExists(restoredIndexName);
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
assertBusy(() -> assertThat(explainIndex(client(), restoredIndexName).get("step"), is(PhaseCompleteStep.NAME)), 30,
TimeUnit.SECONDS);
}
public void testSearchableSnapshotForceMergesIndexToOneSegment() throws Exception {
String snapshotRepo = randomAlphaOfLengthBetween(4, 10);
createSnapshotRepo(client(), snapshotRepo, randomBoolean());
createNewSingletonPolicy(client(), policy, "cold", new SearchableSnapshotAction(snapshotRepo, true));
createComposableTemplate(client(), "template-name", dataStream, new Template(null, null, null));
for (int i = 0; i < randomIntBetween(5, 10); i++) {
indexDocument(client(), dataStream, true);
}
String backingIndexName = DataStream.getDefaultBackingIndexName(dataStream, 1L);
assertThat(getNumberOfSegments(client(), backingIndexName), greaterThan(1));
// rolling over the data stream so we can apply the searchable snapshot policy to a backing index that's not the write index
rolloverMaxOneDocCondition(client(), dataStream);
updateIndexSettings(dataStream, Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy));
assertTrue(waitUntil(() -> {
try {
return getNumberOfSegments(client(), backingIndexName) == 1;
} catch (IOException e) {
return false;
}
}, 60, TimeUnit.SECONDS));
String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + backingIndexName;
assertTrue(waitUntil(() -> {
try {
return indexExists(restoredIndexName);
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
assertBusy(() -> assertThat(explainIndex(client(), restoredIndexName).get("step"), is(PhaseCompleteStep.NAME)), 30,
TimeUnit.SECONDS);
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/pull/54433")
public void testDeleteActionDeletesSearchableSnapshot() throws Exception {
String snapshotRepo = randomAlphaOfLengthBetween(4, 10);
createSnapshotRepo(client(), snapshotRepo, randomBoolean());
// create policy with cold and delete phases
Map<String, LifecycleAction> coldActions =
org.elasticsearch.common.collect.Map.of(SearchableSnapshotAction.NAME, new SearchableSnapshotAction(snapshotRepo));
Map<String, Phase> phases = new HashMap<>();
phases.put("cold", new Phase("cold", TimeValue.ZERO, coldActions));
phases.put("delete", new Phase("delete", TimeValue.timeValueMillis(10000), singletonMap(DeleteAction.NAME,
new DeleteAction(true))));
LifecyclePolicy lifecyclePolicy = new LifecyclePolicy(policy, phases);
// PUT policy
XContentBuilder builder = jsonBuilder();
lifecyclePolicy.toXContent(builder, null);
final StringEntity entity = new StringEntity(
"{ \"policy\":" + Strings.toString(builder) + "}", ContentType.APPLICATION_JSON);
Request createPolicyRequest = new Request("PUT", "_ilm/policy/" + policy);
createPolicyRequest.setEntity(entity);
assertOK(client().performRequest(createPolicyRequest));
createComposableTemplate(client(), "template-name", dataStream,
new Template(Settings.builder().put(LifecycleSettings.LIFECYCLE_NAME, policy).build(), null, null));
indexDocument(client(), dataStream, true);
// rolling over the data stream so we can apply the searchable snapshot policy to a backing index that's not the write index
rolloverMaxOneDocCondition(client(), dataStream);
String[] snapshotName = new String[1];
String backingIndexName = DataStream.getDefaultBackingIndexName(dataStream, 1L);
String restoredIndexName = SearchableSnapshotAction.RESTORED_INDEX_PREFIX + backingIndexName;
assertTrue(waitUntil(() -> {
try {
Map<String, Object> explainIndex = explainIndex(client(), backingIndexName);
if (explainIndex == null) {
// in case we missed the original index and it was deleted
explainIndex = explainIndex(client(), restoredIndexName);
}
snapshotName[0] = (String) explainIndex.get("snapshot_name");
return snapshotName[0] != null;
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
assertBusy(() -> assertFalse(indexExists(restoredIndexName)));
assertTrue("the snapshot we generate in the cold phase should be deleted by the delete phase", waitUntil(() -> {
try {
Request getSnapshotsRequest = new Request("GET", "_snapshot/" + snapshotRepo + "/" + snapshotName[0]);
Response getSnapshotsResponse = client().performRequest(getSnapshotsRequest);
return EntityUtils.toString(getSnapshotsResponse.getEntity()).contains("snapshot_missing_exception");
} catch (IOException e) {
return false;
}
}, 30, TimeUnit.SECONDS));
}
}