mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 09:28:27 +00:00
Backport support for replicating closed indices (#39499) Before this change, closed indexes were simply not replicated. It was therefore possible to close an index and then decommission a data node without knowing that this data node contained shards of the closed index, potentially leading to data loss. Shards of closed indices were not completely taken into account when balancing the shards within the cluster, or automatically replicated through shard copies, and they were not easily movable from node A to node B using APIs like Cluster Reroute without being fully reopened and closed again. This commit changes the logic executed when closing an index, so that its shards are not just removed and forgotten but are instead reinitialized and reallocated on data nodes using an engine implementation which does not allow searching or indexing, which has a low memory overhead (compared with searchable/indexable opened shards) and which allows shards to be recovered from peer or promoted as primaries when needed. This new closing logic is built on top of the new Close Index API introduced in 6.7.0 (#37359). Some pre-closing sanity checks are executed on the shards before closing them, and closing an index on a 8.0 cluster will reinitialize the index shards and therefore impact the cluster health. Some APIs have been adapted to make them work with closed indices: - Cluster Health API - Cluster Reroute API - Cluster Allocation Explain API - Recovery API - Cat Indices - Cat Shards - Cat Health - Cat Recovery This commit contains all the following changes (most recent first): * c6c42a1 Adapt NoOpEngineTests after #39006 * 3f9993d Wait for shards to be active after closing indices (#38854) * 5e7a428 Adapt the Cluster Health API to closed indices (#39364) * 3e61939 Adapt CloseFollowerIndexIT for replicated closed indices (#38767) * 71f5c34 Recover closed indices after a full cluster restart (#39249) * 4db7fd9 Adapt the Recovery API for closed indices (#38421) * 4fd1bb2 Adapt more tests suites to closed indices (#39186) * 0519016 Add replica to primary promotion test for closed indices (#39110) * b756f6c Test the Cluster Shard Allocation Explain API with closed indices (#38631) * c484c66 Remove index routing table of closed indices in mixed versions clusters (#38955) * 00f1828 Mute CloseFollowerIndexIT.testCloseAndReopenFollowerIndex() * e845b0a Do not schedule Refresh/Translog/GlobalCheckpoint tasks for closed indices (#38329) * cf9a015 Adapt testIndexCanChangeCustomDataPath for replicated closed indices (#38327) * b9becdd Adapt testPendingTasks() for replicated closed indices (#38326) * 02cc730 Allow shards of closed indices to be replicated as regular shards (#38024) * e53a9be Fix compilation error in IndexShardIT after merge with master * cae4155 Relax NoOpEngine constraints (#37413) * 54d110b [RCI] Adapt NoOpEngine to latest FrozenEngine changes * c63fd69 [RCI] Add NoOpEngine for closed indices (#33903) Relates to #33888
This commit is contained in:
parent
06d0e0efad
commit
e005eeb0b3
@ -27,6 +27,7 @@ import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.client.RestClient;
|
||||
import org.elasticsearch.client.WarningFailureException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.CheckedFunction;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -47,6 +48,7 @@ import org.junit.Before;
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Base64;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
@ -65,8 +67,11 @@ import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
/**
|
||||
@ -1022,8 +1027,98 @@ public class FullClusterRestartIT extends AbstractFullClusterRestartTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion)
|
||||
throws IOException {
|
||||
/**
|
||||
* This test creates an index in the old cluster and then closes it. When the cluster is fully restarted in a newer version,
|
||||
* it verifies that the index exists and is replicated if the old version supports replication.
|
||||
*/
|
||||
public void testClosedIndices() throws Exception {
|
||||
if (isRunningAgainstOldCluster()) {
|
||||
createIndex(index, Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.build());
|
||||
ensureGreen(index);
|
||||
|
||||
int numDocs = 0;
|
||||
if (randomBoolean()) {
|
||||
numDocs = between(1, 100);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
final Request request = new Request("POST", "/" + index + "/_doc/" + i);
|
||||
request.setJsonEntity(Strings.toString(JsonXContent.contentBuilder().startObject().field("field", "v1").endObject()));
|
||||
assertOK(client().performRequest(request));
|
||||
if (rarely()) {
|
||||
refresh();
|
||||
}
|
||||
}
|
||||
refresh();
|
||||
}
|
||||
|
||||
assertTotalHits(numDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
|
||||
saveInfoDocument(index + "_doc_count", Integer.toString(numDocs));
|
||||
closeIndex(index);
|
||||
}
|
||||
|
||||
if (getOldClusterVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
ensureGreenLongWait(index);
|
||||
assertClosedIndex(index, true);
|
||||
} else {
|
||||
assertClosedIndex(index, false);
|
||||
}
|
||||
|
||||
if (isRunningAgainstOldCluster() == false) {
|
||||
openIndex(index);
|
||||
ensureGreen(index);
|
||||
|
||||
final int expectedNumDocs = Integer.parseInt(loadInfoDocument(index + "_doc_count"));
|
||||
assertTotalHits(expectedNumDocs, entityAsMap(client().performRequest(new Request("GET", "/" + index + "/_search"))));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that an index is closed in the cluster state. If `checkRoutingTable` is true, it also asserts
|
||||
* that the index has started shards.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private void assertClosedIndex(final String index, final boolean checkRoutingTable) throws IOException {
|
||||
final Map<String, ?> state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state")));
|
||||
|
||||
final Map<String, ?> metadata = (Map<String, Object>) XContentMapValues.extractValue("metadata.indices." + index, state);
|
||||
assertThat(metadata, notNullValue());
|
||||
assertThat(metadata.get("state"), equalTo("close"));
|
||||
|
||||
final Map<String, ?> blocks = (Map<String, Object>) XContentMapValues.extractValue("blocks.indices." + index, state);
|
||||
assertThat(blocks, notNullValue());
|
||||
assertThat(blocks.containsKey(String.valueOf(MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID)), is(true));
|
||||
|
||||
final Map<String, ?> settings = (Map<String, Object>) XContentMapValues.extractValue("settings", metadata);
|
||||
assertThat(settings, notNullValue());
|
||||
|
||||
final Map<String, ?> routingTable = (Map<String, Object>) XContentMapValues.extractValue("routing_table.indices." + index, state);
|
||||
if (checkRoutingTable) {
|
||||
assertThat(routingTable, notNullValue());
|
||||
assertThat(Booleans.parseBoolean((String) XContentMapValues.extractValue("index.verified_before_close", settings)), is(true));
|
||||
final String numberOfShards = (String) XContentMapValues.extractValue("index.number_of_shards", settings);
|
||||
assertThat(numberOfShards, notNullValue());
|
||||
final int nbShards = Integer.parseInt(numberOfShards);
|
||||
assertThat(nbShards, greaterThanOrEqualTo(1));
|
||||
|
||||
for (int i = 0; i < nbShards; i++) {
|
||||
final Collection<Map<String, ?>> shards =
|
||||
(Collection<Map<String, ?>>) XContentMapValues.extractValue("shards." + i, routingTable);
|
||||
assertThat(shards, notNullValue());
|
||||
assertThat(shards.size(), equalTo(2));
|
||||
for (Map<String, ?> shard : shards) {
|
||||
assertThat(XContentMapValues.extractValue("shard", shard), equalTo(i));
|
||||
assertThat(XContentMapValues.extractValue("state", shard), equalTo("STARTED"));
|
||||
assertThat(XContentMapValues.extractValue("index", shard), equalTo(index));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assertThat(routingTable, nullValue());
|
||||
assertThat(XContentMapValues.extractValue("index.verified_before_close", settings), nullValue());
|
||||
}
|
||||
}
|
||||
|
||||
private void checkSnapshot(final String snapshotName, final int count, final Version tookOnVersion) throws IOException {
|
||||
// Check the snapshot metadata, especially the version
|
||||
Request listSnapshotRequest = new Request("GET", "/_snapshot/repo/" + snapshotName);
|
||||
Map<String, Object> listSnapshotResponse = entityAsMap(client().performRequest(listSnapshotRequest));
|
||||
|
@ -24,15 +24,21 @@ import org.elasticsearch.client.Request;
|
||||
import org.elasticsearch.client.Response;
|
||||
import org.elasticsearch.client.ResponseException;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.Booleans;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.xcontent.support.XContentMapValues;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.rest.action.document.RestIndexAction;
|
||||
import org.elasticsearch.test.rest.yaml.ObjectPath;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.function.Predicate;
|
||||
@ -43,7 +49,9 @@ import static org.elasticsearch.cluster.routing.allocation.decider.EnableAllocat
|
||||
import static org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider.SETTING_ALLOCATION_MAX_RETRY;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
/**
|
||||
* In depth testing of the recovery mechanism during a rolling restart.
|
||||
@ -310,4 +318,148 @@ public class RecoveryIT extends AbstractRollingTestCase {
|
||||
}
|
||||
ensureGreen(index);
|
||||
}
|
||||
|
||||
/**
|
||||
* This test creates an index in the non upgraded cluster and closes it. It then checks that the index
|
||||
* is effectively closed and potentially replicated (if the version the index was created on supports
|
||||
* the replication of closed indices) during the rolling upgrade.
|
||||
*/
|
||||
public void testRecoveryClosedIndex() throws Exception {
|
||||
final String indexName = "closed_index_created_on_old";
|
||||
if (CLUSTER_TYPE == ClusterType.OLD) {
|
||||
createIndex(indexName, Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 1)
|
||||
// if the node with the replica is the first to be restarted, while a replica is still recovering
|
||||
// then delayed allocation will kick in. When the node comes back, the master will search for a copy
|
||||
// but the recovering copy will be seen as invalid and the cluster health won't return to GREEN
|
||||
// before timing out
|
||||
.put(INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), "100ms")
|
||||
.put(SETTING_ALLOCATION_MAX_RETRY.getKey(), "0") // fail faster
|
||||
.build());
|
||||
ensureGreen(indexName);
|
||||
closeIndex(indexName);
|
||||
}
|
||||
|
||||
final Version indexVersionCreated = indexVersionCreated(indexName);
|
||||
if (indexVersionCreated.onOrAfter(Version.V_7_1_0)) {
|
||||
// index was created on a version that supports the replication of closed indices,
|
||||
// so we expect the index to be closed and replicated
|
||||
ensureGreen(indexName);
|
||||
assertClosedIndex(indexName, true);
|
||||
} else {
|
||||
assertClosedIndex(indexName, false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This test creates and closes a new index at every stage of the rolling upgrade. It then checks that the index
|
||||
* is effectively closed and potentially replicated if the cluster supports replication of closed indices at the
|
||||
* time the index was closed.
|
||||
*/
|
||||
public void testCloseIndexDuringRollingUpgrade() throws Exception {
|
||||
final Version minimumNodeVersion = minimumNodeVersion();
|
||||
final String indexName =
|
||||
String.join("_", "index", CLUSTER_TYPE.toString(), Integer.toString(minimumNodeVersion.id)).toLowerCase(Locale.ROOT);
|
||||
|
||||
final Request indexExistsRequest = new Request("HEAD", "/" + indexName);
|
||||
indexExistsRequest.setOptions(allowTypesRemovalWarnings());
|
||||
|
||||
final Response indexExistsResponse = client().performRequest(indexExistsRequest);
|
||||
if (RestStatus.OK.getStatus() != indexExistsResponse.getStatusLine().getStatusCode()) {
|
||||
createIndex(indexName, Settings.builder()
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_SHARDS_SETTING.getKey(), 1)
|
||||
.put(IndexMetaData.INDEX_NUMBER_OF_REPLICAS_SETTING.getKey(), 0)
|
||||
.build());
|
||||
ensureGreen(indexName);
|
||||
closeIndex(indexName);
|
||||
}
|
||||
|
||||
if (minimumNodeVersion.onOrAfter(Version.V_7_1_0)) {
|
||||
// index is created on a version that supports the replication of closed indices,
|
||||
// so we expect the index to be closed and replicated
|
||||
ensureGreen(indexName);
|
||||
assertClosedIndex(indexName, true);
|
||||
} else {
|
||||
assertClosedIndex(indexName, false);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the version in which the given index has been created
|
||||
*/
|
||||
private static Version indexVersionCreated(final String indexName) throws IOException {
|
||||
final Request request = new Request("GET", "/" + indexName + "/_settings");
|
||||
final String versionCreatedSetting = indexName + ".settings.index.version.created";
|
||||
request.addParameter("filter_path", versionCreatedSetting);
|
||||
|
||||
final Response response = client().performRequest(request);
|
||||
return Version.fromId(Integer.parseInt(ObjectPath.createFromResponse(response).evaluate(versionCreatedSetting)));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the minimum node version among all nodes of the cluster
|
||||
*/
|
||||
private static Version minimumNodeVersion() throws IOException {
|
||||
final Request request = new Request("GET", "_nodes");
|
||||
request.addParameter("filter_path", "nodes.*.version");
|
||||
|
||||
final Response response = client().performRequest(request);
|
||||
final Map<String, Object> nodes = ObjectPath.createFromResponse(response).evaluate("nodes");
|
||||
|
||||
Version minVersion = null;
|
||||
for (Map.Entry<String, Object> node : nodes.entrySet()) {
|
||||
@SuppressWarnings("unchecked")
|
||||
Version nodeVersion = Version.fromString((String) ((Map<String, Object>) node.getValue()).get("version"));
|
||||
if (minVersion == null || minVersion.after(nodeVersion)) {
|
||||
minVersion = nodeVersion;
|
||||
}
|
||||
}
|
||||
assertNotNull(minVersion);
|
||||
return minVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Asserts that an index is closed in the cluster state. If `checkRoutingTable` is true, it also asserts
|
||||
* that the index has started shards.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
private void assertClosedIndex(final String index, final boolean checkRoutingTable) throws IOException {
|
||||
final Map<String, ?> state = entityAsMap(client().performRequest(new Request("GET", "/_cluster/state")));
|
||||
|
||||
final Map<String, ?> metadata = (Map<String, Object>) XContentMapValues.extractValue("metadata.indices." + index, state);
|
||||
assertThat(metadata, notNullValue());
|
||||
assertThat(metadata.get("state"), equalTo("close"));
|
||||
|
||||
final Map<String, ?> blocks = (Map<String, Object>) XContentMapValues.extractValue("blocks.indices." + index, state);
|
||||
assertThat(blocks, notNullValue());
|
||||
assertThat(blocks.containsKey(String.valueOf(MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID)), is(true));
|
||||
|
||||
final Map<String, ?> settings = (Map<String, Object>) XContentMapValues.extractValue("settings", metadata);
|
||||
assertThat(settings, notNullValue());
|
||||
|
||||
final int numberOfShards = Integer.parseInt((String) XContentMapValues.extractValue("index.number_of_shards", settings));
|
||||
final int numberOfReplicas = Integer.parseInt((String) XContentMapValues.extractValue("index.number_of_replicas", settings));
|
||||
|
||||
final Map<String, ?> routingTable = (Map<String, Object>) XContentMapValues.extractValue("routing_table.indices." + index, state);
|
||||
if (checkRoutingTable) {
|
||||
assertThat(routingTable, notNullValue());
|
||||
assertThat(Booleans.parseBoolean((String) XContentMapValues.extractValue("index.verified_before_close", settings)), is(true));
|
||||
|
||||
for (int i = 0; i < numberOfShards; i++) {
|
||||
final Collection<Map<String, ?>> shards =
|
||||
(Collection<Map<String, ?>>) XContentMapValues.extractValue("shards." + i, routingTable);
|
||||
assertThat(shards, notNullValue());
|
||||
assertThat(shards.size(), equalTo(numberOfReplicas + 1));
|
||||
for (Map<String, ?> shard : shards) {
|
||||
assertThat(XContentMapValues.extractValue("shard", shard), equalTo(i));
|
||||
assertThat(XContentMapValues.extractValue("state", shard), equalTo("STARTED"));
|
||||
assertThat(XContentMapValues.extractValue("index", shard), equalTo(index));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
assertThat(routingTable, nullValue());
|
||||
assertThat(XContentMapValues.extractValue("index.verified_before_close", settings), nullValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -12,6 +12,12 @@
|
||||
}
|
||||
},
|
||||
"params": {
|
||||
"expand_wildcards": {
|
||||
"type" : "enum",
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "all",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
},
|
||||
"level": {
|
||||
"type" : "enum",
|
||||
"options" : ["cluster","indices","shards"],
|
||||
|
@ -34,6 +34,10 @@
|
||||
"options" : ["open","closed","none","all"],
|
||||
"default" : "open",
|
||||
"description" : "Whether to expand wildcard expression to concrete indices that are open, closed or both."
|
||||
},
|
||||
"wait_for_active_shards": {
|
||||
"type" : "string",
|
||||
"description" : "Sets the number of active shards to wait for before the operation returns."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1,5 +1,5 @@
|
||||
---
|
||||
"Test cat indices output":
|
||||
"Test cat indices output (no indices)":
|
||||
|
||||
- do:
|
||||
cat.indices: {}
|
||||
@ -7,6 +7,8 @@
|
||||
- match:
|
||||
$body: |
|
||||
/^$/
|
||||
---
|
||||
"Test cat indices output":
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
@ -47,29 +49,88 @@
|
||||
(\d\d\d\d\-\d\d\-\d\dT\d\d:\d\d:\d\d.\d\d\dZ) \s*
|
||||
)
|
||||
$/
|
||||
---
|
||||
"Test cat indices output for closed index (pre 7.1.0)":
|
||||
- skip:
|
||||
version: "7.1.0 - "
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-2
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 3
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index1
|
||||
index: index-2
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
cat.indices:
|
||||
index: index*
|
||||
index: index-*
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^( \s+
|
||||
close \s+
|
||||
index1 \s+
|
||||
/^( \s+
|
||||
close \s+
|
||||
index-2 \s+
|
||||
([a-zA-Z0-9=/_+]|[\\\-]){22} \s+
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s*
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s*
|
||||
)
|
||||
$/
|
||||
---
|
||||
"Test cat indices output for closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-2
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 3
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index-2
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
cat.indices:
|
||||
index: index-*
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^(green \s+
|
||||
close \s+
|
||||
index-2 \s+
|
||||
([a-zA-Z0-9=/_+]|[\\\-]){22} \s+
|
||||
3 \s+
|
||||
0 \s+
|
||||
\s+
|
||||
\s+
|
||||
\s+
|
||||
\s*
|
||||
)
|
||||
$/
|
||||
---
|
||||
"Test cat indices using health status":
|
||||
|
||||
|
@ -75,3 +75,60 @@
|
||||
\n
|
||||
)+
|
||||
$/
|
||||
|
||||
---
|
||||
"Test cat recovery output for closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: closed indices are replicated starting version 7.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index2
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index2
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index2
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
cat.recovery:
|
||||
index: index2
|
||||
h: i,s,t,ty,st,shost,thost,rep,snap,f,fr,fp,tf,b,br,bp,tb,to,tor,top
|
||||
|
||||
- match:
|
||||
$body: |
|
||||
/^
|
||||
(
|
||||
index2 \s+
|
||||
\d \s+ # shard
|
||||
(?:\d+ms|\d+(?:\.\d+)?s) \s+ # time in ms or seconds
|
||||
existing_store \s+ # source type (always existing_store for closed indices)
|
||||
done \s+ # stage
|
||||
[-\w./]+ \s+ # source_host
|
||||
[-\w./]+ \s+ # target_host
|
||||
[-\w./]+ \s+ # repository
|
||||
[-\w./]+ \s+ # snapshot
|
||||
\d+ \s+ # files
|
||||
\d+ \s+ # files_recovered
|
||||
\d+\.\d+% \s+ # files_percent
|
||||
\d+ \s+ # files_total
|
||||
\d+ \s+ # bytes
|
||||
\d+ \s+ # bytes_recovered
|
||||
\d+\.\d+% \s+ # bytes_percent
|
||||
\d+ \s+ # bytes_total
|
||||
0 \s+ # translog_ops (always 0 for closed indices)
|
||||
0 \s+ # translog_ops_recovered (always 0 for closed indices)
|
||||
100\.0% # translog_ops_percent (always 100.0% for closed indices)
|
||||
\n
|
||||
)+
|
||||
$/
|
||||
|
@ -53,3 +53,47 @@
|
||||
- match: { primary: false }
|
||||
- is_true: cluster_info
|
||||
- is_true: can_allocate
|
||||
|
||||
|
||||
---
|
||||
"Cluster shard allocation explanation test with a closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: closed indices are replicated starting version 7.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_closed
|
||||
body: { "settings": { "index.number_of_shards": 1, "index.number_of_replicas": 0 } }
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: test_closed
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_closed
|
||||
|
||||
- match: { acknowledged: true }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: test_closed
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
cluster.allocation_explain:
|
||||
body: { "index": "test_closed", "shard": 0, "primary": true }
|
||||
|
||||
- match: { current_state: "started" }
|
||||
- is_true: current_node.id
|
||||
- match: { index: "test_closed" }
|
||||
- match: { shard: 0 }
|
||||
- match: { primary: true }
|
||||
- is_true: can_remain_on_current_node
|
||||
- is_true: can_rebalance_cluster
|
||||
- is_true: can_rebalance_to_other_node
|
||||
- is_true: rebalance_explanation
|
||||
|
@ -132,4 +132,150 @@
|
||||
- is_true: indices
|
||||
- is_true: indices.test_index.shards
|
||||
|
||||
---
|
||||
"cluster health with closed index (pre 7.1.0)":
|
||||
- skip:
|
||||
version: "7.1.0 - "
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-1
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-2
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 50
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_no_relocating_shards: true
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-*
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-1
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-2
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index-2
|
||||
- is_true: acknowledged
|
||||
|
||||
# closing the index-2 turns the cluster health back to green
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-*
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-1
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-2
|
||||
- match: { status: green }
|
||||
|
||||
---
|
||||
"cluster health with closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-1
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-2
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 50
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
wait_for_no_relocating_shards: true
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-*
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-1
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-2
|
||||
- match: { status: yellow }
|
||||
|
||||
# closing the index-2 does not change the cluster health with replicated closed indices
|
||||
- do:
|
||||
indices.close:
|
||||
index: index-2
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: yellow
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-*
|
||||
- match: { status: yellow }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-1
|
||||
- match: { status: green }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: index-2
|
||||
- match: { status: yellow }
|
||||
|
@ -0,0 +1,79 @@
|
||||
setup:
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-1
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 1
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: index-2
|
||||
body:
|
||||
settings:
|
||||
number_of_shards: 2
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: index-2
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
wait_for_status: green
|
||||
|
||||
---
|
||||
"cluster health with expand_wildcards":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "indices options has been introduced in cluster health request starting version 7.1.0"
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: "index-*"
|
||||
level: indices
|
||||
expand_wildcards: open
|
||||
- match: { status: green }
|
||||
- match: { active_shards: 1 }
|
||||
- match: { indices.index-1.status: green }
|
||||
- match: { indices.index-1.active_shards: 1 }
|
||||
- is_false: indices.index-2
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: "index-*"
|
||||
level: indices
|
||||
expand_wildcards: closed
|
||||
- match: { status: green }
|
||||
- match: { active_shards: 2 }
|
||||
- is_false: indices.index-1
|
||||
- match: { indices.index-2.status: green }
|
||||
- match: { indices.index-2.active_shards: 2 }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: "index-*"
|
||||
level: indices
|
||||
expand_wildcards: all
|
||||
- match: { status: green }
|
||||
- match: { active_shards: 3 }
|
||||
- match: { indices.index-1.status: green }
|
||||
- match: { indices.index-1.active_shards: 1 }
|
||||
- match: { indices.index-2.status: green }
|
||||
- match: { indices.index-2.active_shards: 2 }
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: "index-*"
|
||||
level: indices
|
||||
expand_wildcards: none
|
||||
- match: { status: green }
|
||||
- match: { active_shards: 0 }
|
||||
- is_false: indices.index-1
|
||||
- is_false: indices.index-2
|
@ -14,6 +14,7 @@
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_index
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
@ -24,6 +25,7 @@
|
||||
- do:
|
||||
indices.open:
|
||||
index: test_index
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
@ -50,11 +52,33 @@
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_index
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
indices.open:
|
||||
index: test_index
|
||||
wait_for_active_shards: all
|
||||
|
||||
- is_true: acknowledged
|
||||
- match: { acknowledged: true }
|
||||
- match: { shards_acknowledged: true }
|
||||
|
||||
---
|
||||
"Close index with wait_for_active_shards set to all":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: "closed indices are replicated starting version 7.1.0"
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_index
|
||||
body:
|
||||
settings:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_index
|
||||
wait_for_active_shards: all
|
||||
- is_true: acknowledged
|
||||
- match: { acknowledged: true }
|
||||
- match: { shards_acknowledged: true }
|
||||
|
@ -26,6 +26,7 @@ setup:
|
||||
- do:
|
||||
indices.close:
|
||||
index: _all
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
@ -36,6 +37,7 @@ setup:
|
||||
- do:
|
||||
indices.open:
|
||||
index: _all
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
@ -51,6 +53,7 @@ setup:
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_*
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
@ -61,6 +64,7 @@ setup:
|
||||
- do:
|
||||
indices.open:
|
||||
index: test_*
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
@ -76,6 +80,7 @@ setup:
|
||||
- do:
|
||||
indices.close:
|
||||
index: '*'
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
catch: bad_request
|
||||
@ -86,6 +91,7 @@ setup:
|
||||
- do:
|
||||
indices.open:
|
||||
index: '*'
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
|
@ -40,6 +40,56 @@
|
||||
- gte: { test_1.shards.0.verify_index.check_index_time_in_millis: 0 }
|
||||
- gte: { test_1.shards.0.verify_index.total_time_in_millis: 0 }
|
||||
---
|
||||
"Indices recovery test for closed index":
|
||||
- skip:
|
||||
version: " - 7.0.99"
|
||||
reason: closed indices are replicated starting version 7.1.0
|
||||
|
||||
- do:
|
||||
indices.create:
|
||||
index: test_2
|
||||
body:
|
||||
settings:
|
||||
index:
|
||||
number_of_replicas: 0
|
||||
|
||||
- do:
|
||||
indices.close:
|
||||
index: test_2
|
||||
- is_true: acknowledged
|
||||
|
||||
- do:
|
||||
cluster.health:
|
||||
index: test_2
|
||||
wait_for_status: green
|
||||
|
||||
- do:
|
||||
indices.recovery:
|
||||
index: [test_2]
|
||||
human: true
|
||||
|
||||
- match: { test_2.shards.0.type: "EXISTING_STORE" }
|
||||
- match: { test_2.shards.0.stage: "DONE" }
|
||||
- match: { test_2.shards.0.primary: true }
|
||||
- match: { test_2.shards.0.start_time: /^2\d\d\d-.+/ }
|
||||
- match: { test_2.shards.0.target.ip: /^\d+\.\d+\.\d+\.\d+$/ }
|
||||
- gte: { test_2.shards.0.index.files.total: 0 }
|
||||
- gte: { test_2.shards.0.index.files.reused: 0 }
|
||||
- gte: { test_2.shards.0.index.files.recovered: 0 }
|
||||
- match: { test_2.shards.0.index.files.percent: /^\d+\.\d\%$/ }
|
||||
- gte: { test_2.shards.0.index.size.total_in_bytes: 0 }
|
||||
- gte: { test_2.shards.0.index.size.reused_in_bytes: 0 }
|
||||
- gte: { test_2.shards.0.index.size.recovered_in_bytes: 0 }
|
||||
- match: { test_2.shards.0.index.size.percent: /^\d+\.\d\%$/ }
|
||||
- gte: { test_2.shards.0.index.source_throttle_time_in_millis: 0 }
|
||||
- gte: { test_2.shards.0.index.target_throttle_time_in_millis: 0 }
|
||||
- gte: { test_2.shards.0.translog.recovered: 0 }
|
||||
- gte: { test_2.shards.0.translog.total: 0 }
|
||||
- gte: { test_2.shards.0.translog.total_on_start: 0 }
|
||||
- gte: { test_2.shards.0.translog.total_time_in_millis: 0 }
|
||||
- gte: { test_2.shards.0.verify_index.check_index_time_in_millis: 0 }
|
||||
- gte: { test_2.shards.0.verify_index.total_time_in_millis: 0 }
|
||||
---
|
||||
"Indices recovery test index name not matching":
|
||||
|
||||
- do:
|
||||
|
@ -39,6 +39,7 @@ import java.util.concurrent.TimeUnit;
|
||||
public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthRequest> implements IndicesRequest.Replaceable {
|
||||
|
||||
private String[] indices;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.lenientExpand();
|
||||
private TimeValue timeout = new TimeValue(30, TimeUnit.SECONDS);
|
||||
private ClusterHealthStatus waitForStatus;
|
||||
private boolean waitForNoRelocatingShards = false;
|
||||
@ -83,6 +84,11 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
if (in.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
waitForNoInitializingShards = in.readBoolean();
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
} else {
|
||||
indicesOptions = IndicesOptions.lenientExpandOpen();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -115,6 +121,9 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
if (out.getVersion().onOrAfter(Version.V_6_2_0)) {
|
||||
out.writeBoolean(waitForNoInitializingShards);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -130,7 +139,12 @@ public class ClusterHealthRequest extends MasterNodeReadRequest<ClusterHealthReq
|
||||
|
||||
@Override
|
||||
public IndicesOptions indicesOptions() {
|
||||
return IndicesOptions.lenientExpandOpen();
|
||||
return indicesOptions;
|
||||
}
|
||||
|
||||
public ClusterHealthRequest indicesOptions(final IndicesOptions indicesOptions) {
|
||||
this.indicesOptions = indicesOptions;
|
||||
return this;
|
||||
}
|
||||
|
||||
public TimeValue timeout() {
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.MasterNodeReadOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
@ -38,6 +39,11 @@ public class ClusterHealthRequestBuilder
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterHealthRequestBuilder setIndicesOptions(final IndicesOptions indicesOptions) {
|
||||
request.indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
|
||||
public ClusterHealthRequestBuilder setTimeout(TimeValue timeout) {
|
||||
request.timeout(timeout);
|
||||
return this;
|
||||
|
@ -20,9 +20,8 @@
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.action.Action;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
|
||||
public class CloseIndexAction extends Action<AcknowledgedResponse> {
|
||||
public class CloseIndexAction extends Action<CloseIndexResponse> {
|
||||
|
||||
public static final CloseIndexAction INSTANCE = new CloseIndexAction();
|
||||
public static final String NAME = "indices:admin/close";
|
||||
@ -32,7 +31,7 @@ public class CloseIndexAction extends Action<AcknowledgedResponse> {
|
||||
}
|
||||
|
||||
@Override
|
||||
public AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
public CloseIndexResponse newResponse() {
|
||||
return new CloseIndexResponse();
|
||||
}
|
||||
}
|
||||
|
@ -18,6 +18,7 @@
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
|
||||
|
||||
/**
|
||||
@ -25,7 +26,8 @@ import org.elasticsearch.cluster.ack.IndicesClusterStateUpdateRequest;
|
||||
*/
|
||||
public class CloseIndexClusterStateUpdateRequest extends IndicesClusterStateUpdateRequest<CloseIndexClusterStateUpdateRequest> {
|
||||
|
||||
private final long taskId;
|
||||
private long taskId;
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.DEFAULT;
|
||||
|
||||
public CloseIndexClusterStateUpdateRequest(final long taskId) {
|
||||
this.taskId = taskId;
|
||||
@ -34,4 +36,18 @@ public class CloseIndexClusterStateUpdateRequest extends IndicesClusterStateUpda
|
||||
public long taskId() {
|
||||
return taskId;
|
||||
}
|
||||
|
||||
public CloseIndexClusterStateUpdateRequest taskId(final long taskId) {
|
||||
this.taskId = taskId;
|
||||
return this;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
||||
public CloseIndexClusterStateUpdateRequest waitForActiveShards(final ActiveShardCount waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -19,8 +19,10 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -38,6 +40,7 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
|
||||
|
||||
private String[] indices;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpen();
|
||||
private ActiveShardCount waitForActiveShards = ActiveShardCount.NONE;
|
||||
|
||||
public CloseIndexRequest() {
|
||||
}
|
||||
@ -101,11 +104,25 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
|
||||
return this;
|
||||
}
|
||||
|
||||
public ActiveShardCount waitForActiveShards() {
|
||||
return waitForActiveShards;
|
||||
}
|
||||
|
||||
public CloseIndexRequest waitForActiveShards(final ActiveShardCount waitForActiveShards) {
|
||||
this.waitForActiveShards = waitForActiveShards;
|
||||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
indices = in.readStringArray();
|
||||
indicesOptions = IndicesOptions.readIndicesOptions(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
waitForActiveShards = ActiveShardCount.readFrom(in);
|
||||
} else {
|
||||
waitForActiveShards = ActiveShardCount.NONE;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -113,5 +130,8 @@ public class CloseIndexRequest extends AcknowledgedRequest<CloseIndexRequest> im
|
||||
super.writeTo(out);
|
||||
out.writeStringArray(indices);
|
||||
indicesOptions.writeIndicesOptions(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
waitForActiveShards.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,16 +19,16 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
|
||||
/**
|
||||
* Builder for close index request
|
||||
*/
|
||||
public class CloseIndexRequestBuilder
|
||||
extends AcknowledgedRequestBuilder<CloseIndexRequest, AcknowledgedResponse, CloseIndexRequestBuilder> {
|
||||
extends AcknowledgedRequestBuilder<CloseIndexRequest, CloseIndexResponse, CloseIndexRequestBuilder> {
|
||||
|
||||
public CloseIndexRequestBuilder(ElasticsearchClient client, CloseIndexAction action) {
|
||||
super(client, action, new CloseIndexRequest());
|
||||
@ -60,4 +60,31 @@ public class CloseIndexRequestBuilder
|
||||
request.indicesOptions(indicesOptions);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of shard copies that should be active for indices closing to return.
|
||||
* Defaults to {@link ActiveShardCount#DEFAULT}, which will wait for one shard copy
|
||||
* (the primary) to become active. Set this value to {@link ActiveShardCount#ALL} to
|
||||
* wait for all shards (primary and all replicas) to be active before returning.
|
||||
* Otherwise, use {@link ActiveShardCount#from(int)} to set this value to any
|
||||
* non-negative integer, up to the number of copies per shard (number of replicas + 1),
|
||||
* to wait for the desired amount of shard copies to become active before returning.
|
||||
* Indices closing will only wait up until the timeout value for the number of shard copies
|
||||
* to be active before returning.
|
||||
*
|
||||
* @param waitForActiveShards number of active shard copies to wait on
|
||||
*/
|
||||
public CloseIndexRequestBuilder setWaitForActiveShards(final ActiveShardCount waitForActiveShards) {
|
||||
request.waitForActiveShards(waitForActiveShards);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A shortcut for {@link #setWaitForActiveShards(ActiveShardCount)} where the numerical
|
||||
* shard count is passed in, instead of having to first call {@link ActiveShardCount#from(int)}
|
||||
* to get the ActiveShardCount.
|
||||
*/
|
||||
public CloseIndexRequestBuilder setWaitForActiveShards(final int waitForActiveShards) {
|
||||
return setWaitForActiveShards(ActiveShardCount.from(waitForActiveShards));
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,52 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.ShardsAcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class CloseIndexResponse extends ShardsAcknowledgedResponse {
|
||||
|
||||
CloseIndexResponse() {
|
||||
}
|
||||
|
||||
public CloseIndexResponse(final boolean acknowledged, final boolean shardsAcknowledged) {
|
||||
super(acknowledged, shardsAcknowledged);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
if (in.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
readShardsAcknowledged(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
if (out.getVersion().onOrAfter(Version.V_7_1_0)) {
|
||||
writeShardsAcknowledged(out);
|
||||
}
|
||||
}
|
||||
}
|
@ -23,7 +23,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DestructiveOperations;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
@ -44,7 +43,7 @@ import org.elasticsearch.transport.TransportService;
|
||||
/**
|
||||
* Close index action
|
||||
*/
|
||||
public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, AcknowledgedResponse> {
|
||||
public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIndexRequest, CloseIndexResponse> {
|
||||
|
||||
private final MetaDataIndexStateService indexStateService;
|
||||
private final DestructiveOperations destructiveOperations;
|
||||
@ -76,12 +75,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
||||
}
|
||||
|
||||
@Override
|
||||
protected AcknowledgedResponse newResponse() {
|
||||
return new AcknowledgedResponse();
|
||||
protected CloseIndexResponse newResponse() {
|
||||
return new CloseIndexResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(Task task, CloseIndexRequest request, ActionListener<AcknowledgedResponse> listener) {
|
||||
protected void doExecute(Task task, CloseIndexRequest request, ActionListener<CloseIndexResponse> listener) {
|
||||
destructiveOperations.failDestructive(request.indices());
|
||||
if (closeIndexEnabled == false) {
|
||||
throw new IllegalStateException("closing indices is disabled - set [" + CLUSTER_INDICES_CLOSE_ENABLE_SETTING.getKey() +
|
||||
@ -97,29 +96,33 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final CloseIndexRequest request, final ClusterState state,
|
||||
final ActionListener<AcknowledgedResponse> listener) {
|
||||
protected void masterOperation(final CloseIndexRequest request,
|
||||
final ClusterState state,
|
||||
final ActionListener<CloseIndexResponse> listener) {
|
||||
throw new UnsupportedOperationException("The task parameter is required");
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final Task task, final CloseIndexRequest request, final ClusterState state,
|
||||
final ActionListener<AcknowledgedResponse> listener) throws Exception {
|
||||
protected void masterOperation(final Task task,
|
||||
final CloseIndexRequest request,
|
||||
final ClusterState state,
|
||||
final ActionListener<CloseIndexResponse> listener) throws Exception {
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
if (concreteIndices == null || concreteIndices.length == 0) {
|
||||
listener.onResponse(new AcknowledgedResponse(true));
|
||||
listener.onResponse(new CloseIndexResponse(true, false));
|
||||
return;
|
||||
}
|
||||
|
||||
final CloseIndexClusterStateUpdateRequest closeRequest = new CloseIndexClusterStateUpdateRequest(task.getId())
|
||||
.ackTimeout(request.timeout())
|
||||
.masterNodeTimeout(request.masterNodeTimeout())
|
||||
.waitForActiveShards(request.waitForActiveShards())
|
||||
.indices(concreteIndices);
|
||||
|
||||
indexStateService.closeIndices(closeRequest, new ActionListener<AcknowledgedResponse>() {
|
||||
indexStateService.closeIndices(closeRequest, new ActionListener<CloseIndexResponse>() {
|
||||
|
||||
@Override
|
||||
public void onResponse(final AcknowledgedResponse response) {
|
||||
public void onResponse(final CloseIndexResponse response) {
|
||||
listener.onResponse(response);
|
||||
}
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.recovery;
|
||||
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -47,7 +48,7 @@ public class RecoveryRequest extends BroadcastRequest<RecoveryRequest> {
|
||||
* @param indices Comma-separated list of indices about which to gather recovery information
|
||||
*/
|
||||
public RecoveryRequest(String... indices) {
|
||||
super(indices);
|
||||
super(indices, IndicesOptions.STRICT_EXPAND_OPEN_CLOSED);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -112,11 +112,11 @@ public class TransportRecoveryAction extends TransportBroadcastByNodeAction<Reco
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkGlobalBlock(ClusterState state, RecoveryRequest request) {
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
|
||||
return state.blocks().globalBlockedException(ClusterBlockLevel.METADATA_READ);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices);
|
||||
}
|
||||
}
|
||||
|
@ -90,7 +90,11 @@ public class IndicesOptions implements ToXContentFragment {
|
||||
public static final IndicesOptions STRICT_EXPAND_OPEN =
|
||||
new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES), EnumSet.of(WildcardStates.OPEN));
|
||||
public static final IndicesOptions LENIENT_EXPAND_OPEN =
|
||||
new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE), EnumSet.of(WildcardStates.OPEN));
|
||||
new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE),
|
||||
EnumSet.of(WildcardStates.OPEN));
|
||||
public static final IndicesOptions LENIENT_EXPAND_OPEN_CLOSED =
|
||||
new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES, Option.IGNORE_UNAVAILABLE),
|
||||
EnumSet.of(WildcardStates.OPEN, WildcardStates.CLOSED));
|
||||
public static final IndicesOptions STRICT_EXPAND_OPEN_CLOSED =
|
||||
new IndicesOptions(EnumSet.of(Option.ALLOW_NO_INDICES), EnumSet.of(WildcardStates.OPEN, WildcardStates.CLOSED));
|
||||
public static final IndicesOptions STRICT_EXPAND_OPEN_FORBID_CLOSED =
|
||||
@ -440,6 +444,14 @@ public class IndicesOptions implements ToXContentFragment {
|
||||
return LENIENT_EXPAND_OPEN;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return indices options that ignores unavailable indices, expands wildcards to both open and closed
|
||||
* indices and allows that no indices are resolved from wildcard expressions (not returning an error).
|
||||
*/
|
||||
public static IndicesOptions lenientExpand() {
|
||||
return LENIENT_EXPAND_OPEN_CLOSED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (obj == null) {
|
||||
|
@ -40,6 +40,11 @@ public class BroadcastRequest<Request extends BroadcastRequest<Request>> extends
|
||||
this.indices = indices;
|
||||
}
|
||||
|
||||
protected BroadcastRequest(String[] indices, IndicesOptions indicesOptions) {
|
||||
this.indices = indices;
|
||||
this.indicesOptions = indicesOptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String[] indices() {
|
||||
return indices;
|
||||
|
@ -36,6 +36,7 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheReque
|
||||
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheResponse;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexResponse;
|
||||
@ -307,7 +308,7 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
||||
* @return The result future
|
||||
* @see org.elasticsearch.client.Requests#closeIndexRequest(String)
|
||||
*/
|
||||
ActionFuture<AcknowledgedResponse> close(CloseIndexRequest request);
|
||||
ActionFuture<CloseIndexResponse> close(CloseIndexRequest request);
|
||||
|
||||
/**
|
||||
* Closes an index based on the index name.
|
||||
@ -316,7 +317,7 @@ public interface IndicesAdminClient extends ElasticsearchClient {
|
||||
* @param listener A listener to be notified with a result
|
||||
* @see org.elasticsearch.client.Requests#closeIndexRequest(String)
|
||||
*/
|
||||
void close(CloseIndexRequest request, ActionListener<AcknowledgedResponse> listener);
|
||||
void close(CloseIndexRequest request, ActionListener<CloseIndexResponse> listener);
|
||||
|
||||
/**
|
||||
* Closes one or more indices based on their index name.
|
||||
|
@ -152,6 +152,7 @@ import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRespo
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.create.CreateIndexRequestBuilder;
|
||||
@ -1355,12 +1356,12 @@ public abstract class AbstractClient implements Client {
|
||||
}
|
||||
|
||||
@Override
|
||||
public ActionFuture<AcknowledgedResponse> close(final CloseIndexRequest request) {
|
||||
public ActionFuture<CloseIndexResponse> close(final CloseIndexRequest request) {
|
||||
return execute(CloseIndexAction.INSTANCE, request);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close(final CloseIndexRequest request, final ActionListener<AcknowledgedResponse> listener) {
|
||||
public void close(final CloseIndexRequest request, final ActionListener<CloseIndexResponse> listener) {
|
||||
execute(CloseIndexAction.INSTANCE, request, listener);
|
||||
}
|
||||
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NotifyOnceListener;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardsObserver;
|
||||
@ -52,6 +53,8 @@ import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
@ -90,6 +93,8 @@ public class MetaDataIndexStateService {
|
||||
public static final int INDEX_CLOSED_BLOCK_ID = 4;
|
||||
public static final ClusterBlock INDEX_CLOSED_BLOCK = new ClusterBlock(4, "index closed", false,
|
||||
false, false, RestStatus.FORBIDDEN, ClusterBlockLevel.READ_WRITE);
|
||||
public static final Setting<Boolean> VERIFIED_BEFORE_CLOSE_SETTING =
|
||||
Setting.boolSetting("index.verified_before_close", false, Setting.Property.IndexScope, Setting.Property.PrivateIndex);
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final AllocationService allocationService;
|
||||
@ -119,7 +124,7 @@ public class MetaDataIndexStateService {
|
||||
* Closing indices is a 3 steps process: it first adds a write block to every indices to close, then waits for the operations on shards
|
||||
* to be terminated and finally closes the indices by moving their state to CLOSE.
|
||||
*/
|
||||
public void closeIndices(final CloseIndexClusterStateUpdateRequest request, final ActionListener<AcknowledgedResponse> listener) {
|
||||
public void closeIndices(final CloseIndexClusterStateUpdateRequest request, final ActionListener<CloseIndexResponse> listener) {
|
||||
final Index[] concreteIndices = request.indices();
|
||||
if (concreteIndices == null || concreteIndices.length == 0) {
|
||||
throw new IllegalArgumentException("Index name is required");
|
||||
@ -139,7 +144,7 @@ public class MetaDataIndexStateService {
|
||||
public void clusterStateProcessed(final String source, final ClusterState oldState, final ClusterState newState) {
|
||||
if (oldState == newState) {
|
||||
assert blockedIndices.isEmpty() : "List of blocked indices is not empty but cluster state wasn't changed";
|
||||
listener.onResponse(new AcknowledgedResponse(true));
|
||||
listener.onResponse(new CloseIndexResponse(true, false));
|
||||
} else {
|
||||
assert blockedIndices.isEmpty() == false : "List of blocked indices is empty but cluster state was changed";
|
||||
threadPool.executor(ThreadPool.Names.MANAGEMENT)
|
||||
@ -170,7 +175,29 @@ public class MetaDataIndexStateService {
|
||||
@Override
|
||||
public void clusterStateProcessed(final String source,
|
||||
final ClusterState oldState, final ClusterState newState) {
|
||||
listener.onResponse(new AcknowledgedResponse(acknowledged));
|
||||
|
||||
final String[] indices = results.entrySet().stream()
|
||||
.filter(result -> result.getValue().isAcknowledged())
|
||||
.map(result -> result.getKey().getName())
|
||||
.filter(index -> newState.routingTable().hasIndex(index))
|
||||
.toArray(String[]::new);
|
||||
|
||||
if (indices.length > 0) {
|
||||
activeShardsObserver.waitForActiveShards(indices, request.waitForActiveShards(),
|
||||
request.ackTimeout(), shardsAcknowledged -> {
|
||||
if (shardsAcknowledged == false) {
|
||||
logger.debug("[{}] indices closed, but the operation timed out while waiting " +
|
||||
"for enough shards to be started.", Arrays.toString(indices));
|
||||
}
|
||||
// acknowledged maybe be false but some indices may have been correctly closed, so
|
||||
// we maintain a kind of coherency by overriding the shardsAcknowledged value
|
||||
// (see ShardsAcknowledgedResponse constructor)
|
||||
boolean shardsAcked = acknowledged ? shardsAcknowledged : false;
|
||||
listener.onResponse(new CloseIndexResponse(acknowledged, shardsAcked));
|
||||
}, listener::onFailure);
|
||||
} else {
|
||||
listener.onResponse(new CloseIndexResponse(acknowledged, false));
|
||||
}
|
||||
}
|
||||
}),
|
||||
listener::onFailure)
|
||||
@ -223,10 +250,6 @@ public class MetaDataIndexStateService {
|
||||
// Check if index closing conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexClosing(currentState, indicesToClose);
|
||||
|
||||
// If the cluster is in a mixed version that does not support the shard close action,
|
||||
// we use the previous way to close indices and directly close them without sanity checks
|
||||
final boolean useDirectClose = currentState.nodes().getMinNodeVersion().before(Version.V_6_7_0);
|
||||
|
||||
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
|
||||
|
||||
@ -244,19 +267,11 @@ public class MetaDataIndexStateService {
|
||||
}
|
||||
}
|
||||
}
|
||||
if (useDirectClose) {
|
||||
logger.debug("closing index {} directly", index);
|
||||
metadata.put(IndexMetaData.builder(indexToClose).state(IndexMetaData.State.CLOSE)); // increment version?
|
||||
blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID);
|
||||
routingTable.remove(index.getName());
|
||||
indexBlock = INDEX_CLOSED_BLOCK;
|
||||
} else {
|
||||
if (indexBlock == null) {
|
||||
// Create a new index closed block
|
||||
indexBlock = createIndexClosingBlock();
|
||||
}
|
||||
assert Strings.hasLength(indexBlock.uuid()) : "Closing block should have a UUID";
|
||||
if (indexBlock == null) {
|
||||
// Create a new index closed block
|
||||
indexBlock = createIndexClosingBlock();
|
||||
}
|
||||
assert Strings.hasLength(indexBlock.uuid()) : "Closing block should have a UUID";
|
||||
blocks.addIndexBlock(index.getName(), indexBlock);
|
||||
blockedIndices.put(index, indexBlock);
|
||||
}
|
||||
@ -384,6 +399,11 @@ public class MetaDataIndexStateService {
|
||||
static ClusterState closeRoutingTable(final ClusterState currentState,
|
||||
final Map<Index, ClusterBlock> blockedIndices,
|
||||
final Map<Index, AcknowledgedResponse> results) {
|
||||
|
||||
// Remove the index routing table of closed indices if the cluster is in a mixed version
|
||||
// that does not support the replication of closed indices
|
||||
final boolean removeRoutingTable = currentState.nodes().getMinNodeVersion().before(Version.V_7_1_0);
|
||||
|
||||
final MetaData.Builder metadata = MetaData.builder(currentState.metaData());
|
||||
final ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
final RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
|
||||
@ -409,15 +429,28 @@ public class MetaDataIndexStateService {
|
||||
continue;
|
||||
}
|
||||
|
||||
blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID);
|
||||
blocks.addIndexBlock(index.getName(), INDEX_CLOSED_BLOCK);
|
||||
final IndexMetaData.Builder updatedMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE);
|
||||
if (removeRoutingTable) {
|
||||
metadata.put(updatedMetaData);
|
||||
routingTable.remove(index.getName());
|
||||
} else {
|
||||
metadata.put(updatedMetaData
|
||||
.settingsVersion(indexMetaData.getSettingsVersion() + 1)
|
||||
.settings(Settings.builder()
|
||||
.put(indexMetaData.getSettings())
|
||||
.put(VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)));
|
||||
routingTable.addAsFromOpenToClose(metadata.getSafe(index));
|
||||
}
|
||||
|
||||
logger.debug("closing index {} succeeded", index);
|
||||
blocks.removeIndexBlockWithId(index.getName(), INDEX_CLOSED_BLOCK_ID).addIndexBlock(index.getName(), INDEX_CLOSED_BLOCK);
|
||||
metadata.put(IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE));
|
||||
routingTable.remove(index.getName());
|
||||
closedIndices.add(index.getName());
|
||||
} catch (final IndexNotFoundException e) {
|
||||
logger.debug("index {} has been deleted since it was blocked before closing, ignoring", index);
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("completed closing of indices {}", closedIndices);
|
||||
return ClusterState.builder(currentState).blocks(blocks).metaData(metadata).routingTable(routingTable.build()).build();
|
||||
}
|
||||
@ -491,7 +524,15 @@ public class MetaDataIndexStateService {
|
||||
for (IndexMetaData indexMetaData : indicesToOpen) {
|
||||
final Index index = indexMetaData.getIndex();
|
||||
if (indexMetaData.getState() != IndexMetaData.State.OPEN) {
|
||||
IndexMetaData updatedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.OPEN).build();
|
||||
final Settings.Builder updatedSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
updatedSettings.remove(VERIFIED_BEFORE_CLOSE_SETTING.getKey());
|
||||
|
||||
IndexMetaData updatedIndexMetaData = IndexMetaData.builder(indexMetaData)
|
||||
.state(IndexMetaData.State.OPEN)
|
||||
.settingsVersion(indexMetaData.getSettingsVersion() + 1)
|
||||
.settings(updatedSettings)
|
||||
.build();
|
||||
|
||||
// The index might be closed because we couldn't import it due to old incompatible version
|
||||
// We need to check that this index can be upgraded to the current version
|
||||
updatedIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(updatedIndexMetaData, minIndexCompatibilityVersion);
|
||||
@ -555,4 +596,9 @@ public class MetaDataIndexStateService {
|
||||
EnumSet.of(ClusterBlockLevel.WRITE));
|
||||
}
|
||||
|
||||
public static boolean isIndexVerifiedBeforeClosed(final IndexMetaData indexMetaData) {
|
||||
return indexMetaData.getState() == IndexMetaData.State.CLOSE
|
||||
&& VERIFIED_BEFORE_CLOSE_SETTING.exists(indexMetaData.getSettings())
|
||||
&& VERIFIED_BEFORE_CLOSE_SETTING.get(indexMetaData.getSettings());
|
||||
}
|
||||
}
|
||||
|
@ -358,6 +358,13 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new empty index, as as a result of closing an opened index.
|
||||
*/
|
||||
public Builder initializeAsFromOpenToClose(IndexMetaData indexMetaData) {
|
||||
return initializeEmpty(indexMetaData, new UnassignedInfo(UnassignedInfo.Reason.INDEX_CLOSED, null));
|
||||
}
|
||||
|
||||
/**
|
||||
* Initializes a new empty index, to be restored from a snapshot
|
||||
*/
|
||||
|
@ -47,6 +47,8 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.MetaDataIndexStateService.isIndexVerifiedBeforeClosed;
|
||||
|
||||
/**
|
||||
* Represents a global cluster-wide routing table for all indices including the
|
||||
* version of the current routing state.
|
||||
@ -499,9 +501,9 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
}
|
||||
|
||||
public Builder addAsRecovery(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN || isIndexVerifiedBeforeClosed(indexMetaData)) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsRecovery(indexMetaData);
|
||||
.initializeAsRecovery(indexMetaData);
|
||||
add(indexRoutingBuilder);
|
||||
}
|
||||
return this;
|
||||
@ -525,6 +527,13 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addAsFromOpenToClose(IndexMetaData indexMetaData) {
|
||||
assert isIndexVerifiedBeforeClosed(indexMetaData);
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsFromOpenToClose(indexMetaData);
|
||||
return add(indexRoutingBuilder);
|
||||
}
|
||||
|
||||
public Builder addAsRestore(IndexMetaData indexMetaData, SnapshotRecoverySource recoverySource) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsRestore(indexMetaData, recoverySource);
|
||||
|
@ -118,7 +118,11 @@ public final class UnassignedInfo implements ToXContentFragment, Writeable {
|
||||
/**
|
||||
* Forced manually to allocate
|
||||
*/
|
||||
MANUAL_ALLOCATION
|
||||
MANUAL_ALLOCATION,
|
||||
/**
|
||||
* Unassigned as a result of closing an index.
|
||||
*/
|
||||
INDEX_CLOSED
|
||||
}
|
||||
|
||||
/**
|
||||
@ -264,6 +268,8 @@ public final class UnassignedInfo implements ToXContentFragment, Writeable {
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (out.getVersion().before(Version.V_6_0_0_beta2) && reason == Reason.MANUAL_ALLOCATION) {
|
||||
out.writeByte((byte) Reason.ALLOCATION_FAILED.ordinal());
|
||||
} else if (out.getVersion().before(Version.V_7_0_0) && reason == Reason.INDEX_CLOSED) {
|
||||
out.writeByte((byte) Reason.REINITIALIZED.ordinal());
|
||||
} else {
|
||||
out.writeByte((byte) reason.ordinal());
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.common.settings;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.MaxRetryAllocationDecider;
|
||||
@ -162,6 +163,7 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
|
||||
EngineConfig.INDEX_OPTIMIZE_AUTO_GENERATED_IDS,
|
||||
IndexMetaData.SETTING_WAIT_FOR_ACTIVE_SHARDS,
|
||||
IndexSettings.DEFAULT_PIPELINE,
|
||||
MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING,
|
||||
|
||||
// validate that built-in similarities don't get redefined
|
||||
Setting.groupSetting("index.similarity.", (s) -> {
|
||||
|
@ -673,7 +673,7 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
// once we change the refresh interval we schedule yet another refresh
|
||||
// to ensure we are in a clean and predictable state.
|
||||
// it doesn't matter if we move from or to <code>-1</code> in both cases we want
|
||||
// docs to become visible immediately. This also flushes all pending indexing / search reqeusts
|
||||
// docs to become visible immediately. This also flushes all pending indexing / search requests
|
||||
// that are waiting for a refresh.
|
||||
threadPool.executor(ThreadPool.Names.REFRESH).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
@ -830,17 +830,20 @@ public class IndexService extends AbstractIndexComponent implements IndicesClust
|
||||
}
|
||||
|
||||
abstract static class BaseAsyncTask extends AbstractAsyncTask {
|
||||
|
||||
protected final IndexService indexService;
|
||||
|
||||
BaseAsyncTask(IndexService indexService, TimeValue interval) {
|
||||
BaseAsyncTask(final IndexService indexService, final TimeValue interval) {
|
||||
super(indexService.logger, indexService.threadPool, interval, true);
|
||||
this.indexService = indexService;
|
||||
rescheduleIfNecessary();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected boolean mustReschedule() {
|
||||
// don't re-schedule if its closed or if we don't have a single shard here..., we are done
|
||||
return indexService.closed.get() == false;
|
||||
// don't re-schedule if the IndexService instance is closed or if the index is closed
|
||||
return indexService.closed.get() == false
|
||||
&& indexService.indexSettings.getIndexMetaData().getState() == IndexMetaData.State.OPEN;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -0,0 +1,89 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexCommit;
|
||||
import org.apache.lucene.index.IndexWriter;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.store.Directory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* NoOpEngine is an engine implementation that does nothing but the bare minimum
|
||||
* required in order to have an engine. All attempts to do something (search,
|
||||
* index, get), throw {@link UnsupportedOperationException}.
|
||||
*/
|
||||
public final class NoOpEngine extends ReadOnlyEngine {
|
||||
|
||||
public NoOpEngine(EngineConfig config) {
|
||||
super(config, null, null, true, Function.identity());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DirectoryReader open(final IndexCommit commit) throws IOException {
|
||||
final Directory directory = commit.getDirectory();
|
||||
final List<IndexCommit> indexCommits = DirectoryReader.listCommits(directory);
|
||||
final IndexCommit indexCommit = indexCommits.get(indexCommits.size() - 1);
|
||||
return new DirectoryReader(directory, new LeafReader[0]) {
|
||||
@Override
|
||||
protected DirectoryReader doOpenIfChanged() throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DirectoryReader doOpenIfChanged(IndexCommit commit) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected DirectoryReader doOpenIfChanged(IndexWriter writer, boolean applyAllDeletes) throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getVersion() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isCurrent() throws IOException {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexCommit getIndexCommit() throws IOException {
|
||||
return indexCommit;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public CacheHelper getReaderCacheHelper() {
|
||||
return null;
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
@ -87,6 +87,7 @@ import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.EngineConfig;
|
||||
import org.elasticsearch.index.engine.EngineFactory;
|
||||
import org.elasticsearch.index.engine.InternalEngineFactory;
|
||||
import org.elasticsearch.index.engine.NoOpEngine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
@ -558,6 +559,12 @@ public class IndicesService extends AbstractLifecycleComponent
|
||||
}
|
||||
|
||||
private EngineFactory getEngineFactory(final IndexSettings idxSettings) {
|
||||
final IndexMetaData indexMetaData = idxSettings.getIndexMetaData();
|
||||
if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
// NoOpEngine takes precedence as long as the index is closed
|
||||
return NoOpEngine::new;
|
||||
}
|
||||
|
||||
final List<Optional<EngineFactory>> engineFactories =
|
||||
engineFactoryProviders
|
||||
.stream()
|
||||
|
@ -101,6 +101,7 @@ import static org.elasticsearch.indices.cluster.IndicesClusterStateService.Alloc
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.DELETED;
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.FAILURE;
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.NO_LONGER_ASSIGNED;
|
||||
import static org.elasticsearch.indices.cluster.IndicesClusterStateService.AllocatedIndices.IndexRemovalReason.REOPENED;
|
||||
|
||||
public class IndicesClusterStateService extends AbstractLifecycleComponent implements ClusterStateApplier {
|
||||
private static final Logger logger = LogManager.getLogger(IndicesClusterStateService.class);
|
||||
@ -257,7 +258,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
|
||||
deleteIndices(event); // also deletes shards of deleted indices
|
||||
|
||||
removeUnallocatedIndices(event); // also removes shards of removed indices
|
||||
removeIndices(event); // also removes shards of removed indices
|
||||
|
||||
failMissingShards(state);
|
||||
|
||||
@ -369,17 +370,18 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
}
|
||||
|
||||
/**
|
||||
* Removes indices that have no shards allocated to this node. This does not delete the shard data as we wait for enough
|
||||
* shard copies to exist in the cluster before deleting shard data (triggered by {@link org.elasticsearch.indices.store.IndicesStore}).
|
||||
* Removes indices that have no shards allocated to this node or indices whose state has changed. This does not delete the shard data
|
||||
* as we wait for enough shard copies to exist in the cluster before deleting shard data (triggered by
|
||||
* {@link org.elasticsearch.indices.store.IndicesStore}).
|
||||
*
|
||||
* @param event the cluster changed event
|
||||
*/
|
||||
private void removeUnallocatedIndices(final ClusterChangedEvent event) {
|
||||
private void removeIndices(final ClusterChangedEvent event) {
|
||||
final ClusterState state = event.state();
|
||||
final String localNodeId = state.nodes().getLocalNodeId();
|
||||
assert localNodeId != null;
|
||||
|
||||
Set<Index> indicesWithShards = new HashSet<>();
|
||||
final Set<Index> indicesWithShards = new HashSet<>();
|
||||
RoutingNode localRoutingNode = state.getRoutingNodes().node(localNodeId);
|
||||
if (localRoutingNode != null) { // null e.g. if we are not a data node
|
||||
for (ShardRouting shardRouting : localRoutingNode) {
|
||||
@ -388,20 +390,27 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
}
|
||||
|
||||
for (AllocatedIndex<? extends Shard> indexService : indicesService) {
|
||||
Index index = indexService.index();
|
||||
if (indicesWithShards.contains(index) == false) {
|
||||
final Index index = indexService.index();
|
||||
final IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
final IndexMetaData existingMetaData = indexService.getIndexSettings().getIndexMetaData();
|
||||
|
||||
AllocatedIndices.IndexRemovalReason reason = null;
|
||||
if (indexMetaData != null && indexMetaData.getState() != existingMetaData.getState()) {
|
||||
reason = indexMetaData.getState() == IndexMetaData.State.CLOSE ? CLOSED : REOPENED;
|
||||
} else if (indicesWithShards.contains(index) == false) {
|
||||
// if the cluster change indicates a brand new cluster, we only want
|
||||
// to remove the in-memory structures for the index and not delete the
|
||||
// contents on disk because the index will later be re-imported as a
|
||||
// dangling index
|
||||
final IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
assert indexMetaData != null || event.isNewCluster() :
|
||||
"index " + index + " does not exist in the cluster state, it should either " +
|
||||
"have been deleted or the cluster must be new";
|
||||
final AllocatedIndices.IndexRemovalReason reason =
|
||||
indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE ? CLOSED : NO_LONGER_ASSIGNED;
|
||||
logger.debug("{} removing index, [{}]", index, reason);
|
||||
indicesService.removeIndex(index, reason, "removing index (no shards allocated)");
|
||||
reason = indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE ? CLOSED : NO_LONGER_ASSIGNED;
|
||||
}
|
||||
|
||||
if (reason != null) {
|
||||
logger.debug("{} removing index ({})", index, reason);
|
||||
indicesService.removeIndex(index, reason, "removing index (" + reason + ")");
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -612,7 +621,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
ClusterState clusterState) {
|
||||
final ShardRouting currentRoutingEntry = shard.routingEntry();
|
||||
assert currentRoutingEntry.isSameAllocation(shardRouting) :
|
||||
"local shard has a different allocation id but wasn't cleaning by removeShards. "
|
||||
"local shard has a different allocation id but wasn't cleaned by removeShards. "
|
||||
+ "cluster state: " + shardRouting + " local: " + currentRoutingEntry;
|
||||
|
||||
final long primaryTerm;
|
||||
@ -747,7 +756,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
private void sendFailShard(ShardRouting shardRouting, String message, @Nullable Exception failure, ClusterState state) {
|
||||
try {
|
||||
logger.warn(() -> new ParameterizedMessage(
|
||||
"[{}] marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
|
||||
"{} marking and sending shard failed due to [{}]", shardRouting.shardId(), message), failure);
|
||||
failedShardsCache.put(shardRouting.shardId(), shardRouting);
|
||||
shardStateAction.localShardFailed(shardRouting, message, failure, SHARD_STATE_ACTION_LISTENER, state);
|
||||
} catch (Exception inner) {
|
||||
@ -948,7 +957,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
DELETED,
|
||||
|
||||
/**
|
||||
* The index have been closed. The index should be removed and all associated resources released. Persistent parts of the index
|
||||
* The index has been closed. The index should be removed and all associated resources released. Persistent parts of the index
|
||||
* like the shards files, state and transaction logs are kept around in the case of a disaster recovery.
|
||||
*/
|
||||
CLOSED,
|
||||
@ -958,7 +967,13 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent imple
|
||||
* Persistent parts of the index like the shards files, state and transaction logs are kept around in the
|
||||
* case of a disaster recovery.
|
||||
*/
|
||||
FAILURE
|
||||
FAILURE,
|
||||
|
||||
/**
|
||||
* The index has been reopened. The index should be removed and all associated resources released. Persistent parts of the index
|
||||
* like the shards files, state and transaction logs are kept around in the case of a disaster recovery.
|
||||
*/
|
||||
REOPENED,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,6 +21,7 @@ package org.elasticsearch.rest.action.admin.cluster;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.Priority;
|
||||
@ -39,9 +40,9 @@ import java.util.Set;
|
||||
import static org.elasticsearch.client.Requests.clusterHealthRequest;
|
||||
|
||||
public class RestClusterHealthAction extends BaseRestHandler {
|
||||
|
||||
public RestClusterHealthAction(Settings settings, RestController controller) {
|
||||
super(settings);
|
||||
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_cluster/health", this);
|
||||
controller.registerHandler(RestRequest.Method.GET, "/_cluster/health/{index}", this);
|
||||
}
|
||||
@ -53,7 +54,8 @@ public class RestClusterHealthAction extends BaseRestHandler {
|
||||
|
||||
@Override
|
||||
public RestChannelConsumer prepareRequest(final RestRequest request, final NodeClient client) throws IOException {
|
||||
ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
final ClusterHealthRequest clusterHealthRequest = clusterHealthRequest(Strings.splitStringByCommaToArray(request.param("index")));
|
||||
clusterHealthRequest.indicesOptions(IndicesOptions.fromRequest(request, clusterHealthRequest.indicesOptions()));
|
||||
clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local()));
|
||||
clusterHealthRequest.masterNodeTimeout(request.paramAsTime("master_timeout", clusterHealthRequest.masterNodeTimeout()));
|
||||
clusterHealthRequest.timeout(request.paramAsTime("timeout", clusterHealthRequest.timeout()));
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.rest.action.admin.indices;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.client.node.NodeClient;
|
||||
import org.elasticsearch.common.Strings;
|
||||
@ -49,6 +50,10 @@ public class RestCloseIndexAction extends BaseRestHandler {
|
||||
closeIndexRequest.masterNodeTimeout(request.paramAsTime("master_timeout", closeIndexRequest.masterNodeTimeout()));
|
||||
closeIndexRequest.timeout(request.paramAsTime("timeout", closeIndexRequest.timeout()));
|
||||
closeIndexRequest.indicesOptions(IndicesOptions.fromRequest(request, closeIndexRequest.indicesOptions()));
|
||||
String waitForActiveShards = request.param("wait_for_active_shards");
|
||||
if (waitForActiveShards != null) {
|
||||
closeIndexRequest.waitForActiveShards(ActiveShardCount.parseString(waitForActiveShards));
|
||||
}
|
||||
return channel -> client.admin().indices().close(closeIndexRequest, new RestToXContentListener<>(channel));
|
||||
}
|
||||
|
||||
|
@ -104,9 +104,9 @@ public class RestIndicesAction extends AbstractCatAction {
|
||||
// 2) the deleted index was resolved as part of a wildcard or _all. In this case, we want the subsequent requests not to
|
||||
// fail on the deleted index (as we want to ignore wildcards that cannot be resolved).
|
||||
// This behavior can be ensured by letting the cluster health and indices stats requests re-resolve the index names with the
|
||||
// same indices options that we used for the initial cluster state request (strictExpand). Unfortunately cluster health
|
||||
// requests hard-code their indices options and the best we can do is apply strictExpand to the indices stats request.
|
||||
// same indices options that we used for the initial cluster state request (strictExpand).
|
||||
final ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(indices);
|
||||
clusterHealthRequest.indicesOptions(strictExpandIndicesOptions);
|
||||
clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local()));
|
||||
|
||||
client.admin().cluster().health(clusterHealthRequest, new RestActionListener<ClusterHealthResponse>(channel) {
|
||||
@ -383,34 +383,37 @@ public class RestIndicesAction extends AbstractCatAction {
|
||||
}
|
||||
|
||||
// package private for testing
|
||||
Table buildTable(RestRequest request, IndexMetaData[] indicesMetaData, ClusterHealthResponse response, IndicesStatsResponse stats) {
|
||||
Table buildTable(final RestRequest request,
|
||||
final IndexMetaData[] indicesMetaData,
|
||||
final ClusterHealthResponse clusterHealthResponse,
|
||||
final IndicesStatsResponse indicesStatsResponse) {
|
||||
final String healthParam = request.param("health");
|
||||
final ClusterHealthStatus status;
|
||||
if (healthParam != null) {
|
||||
status = ClusterHealthStatus.fromString(healthParam);
|
||||
} else {
|
||||
status = null;
|
||||
}
|
||||
|
||||
Table table = getTableWithHeader(request);
|
||||
|
||||
final Table table = getTableWithHeader(request);
|
||||
for (IndexMetaData indexMetaData : indicesMetaData) {
|
||||
final String indexName = indexMetaData.getIndex().getName();
|
||||
ClusterIndexHealth indexHealth = response.getIndices().get(indexName);
|
||||
IndexStats indexStats = stats.getIndices().get(indexName);
|
||||
IndexMetaData.State state = indexMetaData.getState();
|
||||
boolean searchThrottled = IndexSettings.INDEX_SEARCH_THROTTLED.get(indexMetaData.getSettings());
|
||||
final ClusterIndexHealth indexHealth = clusterHealthResponse.getIndices().get(indexName);
|
||||
final IndexStats indexStats = indicesStatsResponse.getIndices().get(indexName);
|
||||
final IndexMetaData.State indexState = indexMetaData.getState();
|
||||
final boolean searchThrottled = IndexSettings.INDEX_SEARCH_THROTTLED.get(indexMetaData.getSettings());
|
||||
|
||||
if (status != null) {
|
||||
if (state == IndexMetaData.State.CLOSE ||
|
||||
(indexHealth == null && false == ClusterHealthStatus.RED.equals(status)) ||
|
||||
false == indexHealth.getStatus().equals(status)) {
|
||||
if (healthParam != null) {
|
||||
final ClusterHealthStatus healthStatusFilter = ClusterHealthStatus.fromString(healthParam);
|
||||
boolean skip;
|
||||
if (indexHealth != null) {
|
||||
// index health is known but does not match the one requested
|
||||
skip = indexHealth.getStatus() != healthStatusFilter;
|
||||
} else {
|
||||
// index health is unknown, skip if we don't explicitly request RED health or if the index is closed but not replicated
|
||||
skip = ClusterHealthStatus.RED != healthStatusFilter || indexState == IndexMetaData.State.CLOSE;
|
||||
}
|
||||
if (skip) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// the open index is present in the cluster state but is not returned in the indices stats API
|
||||
if (indexStats == null && state != IndexMetaData.State.CLOSE) {
|
||||
if (indexStats == null && indexState != IndexMetaData.State.CLOSE) {
|
||||
// the index stats API is called last, after cluster state and cluster health. If the index stats
|
||||
// has not resolved the same open indices as the initial cluster state call, then the indices might
|
||||
// have been removed in the meantime or, more likely, are unauthorized. This is because the cluster
|
||||
@ -422,9 +425,8 @@ public class RestIndicesAction extends AbstractCatAction {
|
||||
final CommonStats primaryStats;
|
||||
final CommonStats totalStats;
|
||||
|
||||
if (state == IndexMetaData.State.CLOSE) {
|
||||
if (indexState == IndexMetaData.State.CLOSE) {
|
||||
// empty stats for closed indices, but their names are displayed
|
||||
assert indexStats == null;
|
||||
primaryStats = new CommonStats();
|
||||
totalStats = new CommonStats();
|
||||
} else {
|
||||
@ -433,9 +435,15 @@ public class RestIndicesAction extends AbstractCatAction {
|
||||
}
|
||||
|
||||
table.startRow();
|
||||
table.addCell(state == IndexMetaData.State.OPEN ?
|
||||
(indexHealth == null ? "red*" : indexHealth.getStatus().toString().toLowerCase(Locale.ROOT)) : null);
|
||||
table.addCell(state.toString().toLowerCase(Locale.ROOT));
|
||||
|
||||
String health = null;
|
||||
if (indexHealth != null) {
|
||||
health = indexHealth.getStatus().toString().toLowerCase(Locale.ROOT);
|
||||
} else if (indexStats != null) {
|
||||
health = "red*";
|
||||
}
|
||||
table.addCell(health);
|
||||
table.addCell(indexState.toString().toLowerCase(Locale.ROOT));
|
||||
table.addCell(indexName);
|
||||
table.addCell(indexMetaData.getIndexUUID());
|
||||
table.addCell(indexHealth == null ? null : indexHealth.getNumberOfShards());
|
||||
|
@ -268,7 +268,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
||||
// it's fine to keep the contexts open if the index is still "alive"
|
||||
// unfortunately we don't have a clear way to signal today why an index is closed.
|
||||
// to release memory and let references to the filesystem go etc.
|
||||
if (reason == IndexRemovalReason.DELETED || reason == IndexRemovalReason.CLOSED) {
|
||||
if (reason == IndexRemovalReason.DELETED || reason == IndexRemovalReason.CLOSED || reason == IndexRemovalReason.REOPENED) {
|
||||
freeAllContextForIndex(index);
|
||||
}
|
||||
|
||||
|
@ -19,9 +19,12 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.allocation;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ClusterInfo;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
@ -32,8 +35,10 @@ import org.elasticsearch.cluster.routing.allocation.AllocationDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.MoveDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.NodeAllocationResult;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
@ -52,6 +57,7 @@ import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
@ -70,8 +76,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
logger.info("--> starting 2 nodes");
|
||||
internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> creating an index with 1 primary, 0 replicas");
|
||||
createIndexAndIndexData(1, 0);
|
||||
prepareIndex(1, 0);
|
||||
|
||||
logger.info("--> stopping the node with the primary");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNodeName()));
|
||||
@ -149,8 +154,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
logger.info("--> starting 3 nodes");
|
||||
internalCluster().startNodes(3);
|
||||
|
||||
logger.info("--> creating an index with 1 primary, 1 replica");
|
||||
createIndexAndIndexData(1, 1);
|
||||
prepareIndex(1, 1);
|
||||
logger.info("--> stopping the node with the replica");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode().getName()));
|
||||
ensureStableCluster(2);
|
||||
@ -268,8 +272,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
logger.info("--> starting 3 nodes");
|
||||
List<String> nodes = internalCluster().startNodes(3);
|
||||
|
||||
logger.info("--> creating an index with 1 primary and 1 replica");
|
||||
createIndexAndIndexData(1, 1);
|
||||
prepareIndex(1, 1);
|
||||
String primaryNodeName = primaryNodeName();
|
||||
nodes.remove(primaryNodeName);
|
||||
|
||||
@ -390,7 +393,8 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> creating an index with 1 primary, 0 replicas, with allocation filtering so the primary can't be assigned");
|
||||
createIndexAndIndexData(1, 0, Settings.builder().put("index.routing.allocation.include._name", "non_existent_node").build(),
|
||||
prepareIndex(IndexMetaData.State.OPEN, 1, 0,
|
||||
Settings.builder().put("index.routing.allocation.include._name", "non_existent_node").build(),
|
||||
ActiveShardCount.NONE);
|
||||
|
||||
boolean includeYesDecisions = randomBoolean();
|
||||
@ -481,8 +485,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
logger.info("--> starting 2 nodes");
|
||||
internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> creating an index with 1 primary and 0 replicas");
|
||||
createIndexAndIndexData(1, 0);
|
||||
prepareIndex(1, 0);
|
||||
|
||||
logger.info("--> setting up allocation filtering to prevent allocation to both nodes");
|
||||
client().admin().indices().prepareUpdateSettings("idx").setSettings(
|
||||
@ -591,8 +594,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
internalCluster().startNode();
|
||||
ensureStableCluster(1);
|
||||
|
||||
logger.info("--> creating an index with 5 shards, all allocated to the single node");
|
||||
createIndexAndIndexData(5, 0);
|
||||
prepareIndex(5, 0);
|
||||
|
||||
logger.info("--> disabling rebalancing on the index");
|
||||
client().admin().indices().prepareUpdateSettings("idx").setSettings(
|
||||
@ -704,8 +706,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
internalCluster().startNode();
|
||||
ensureStableCluster(1);
|
||||
|
||||
logger.info("--> creating an index with 5 shards, all allocated to the single node");
|
||||
createIndexAndIndexData(5, 0);
|
||||
prepareIndex(5, 0);
|
||||
|
||||
logger.info("--> setting balancing threshold really high, so it won't be met");
|
||||
client().admin().cluster().prepareUpdateSettings().setTransientSettings(
|
||||
@ -808,8 +809,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
String firstNode = internalCluster().startNode();
|
||||
ensureStableCluster(1);
|
||||
|
||||
logger.info("--> creating an index with 5 shards, all allocated to the single node");
|
||||
createIndexAndIndexData(5, 0);
|
||||
prepareIndex(5, 0);
|
||||
|
||||
logger.info("--> setting up allocation filtering to only allow allocation to the current node");
|
||||
client().admin().indices().prepareUpdateSettings("idx").setSettings(
|
||||
@ -918,9 +918,9 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
logger.info("--> starting 3 nodes");
|
||||
List<String> nodes = internalCluster().startNodes(3);
|
||||
|
||||
logger.info("--> creating an index with 1 primary and 2 replicas");
|
||||
String excludedNode = nodes.get(randomIntBetween(0, 2));
|
||||
createIndexAndIndexData(1, 2, Settings.builder().put("index.routing.allocation.exclude._name", excludedNode).build(),
|
||||
prepareIndex(randomIndexState(), 1, 2,
|
||||
Settings.builder().put("index.routing.allocation.exclude._name", excludedNode).build(),
|
||||
ActiveShardCount.from(2));
|
||||
|
||||
boolean includeYesDecisions = randomBoolean();
|
||||
@ -1019,8 +1019,7 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
final String replicaNode = internalCluster().startNode();
|
||||
final String primaryNode = internalCluster().startNode();
|
||||
|
||||
logger.info("--> creating an index with 1 primary and 1 replica");
|
||||
createIndexAndIndexData(1, 1,
|
||||
prepareIndex(IndexMetaData.State.OPEN, 1, 1,
|
||||
Settings.builder()
|
||||
.put("index.routing.allocation.include._name", primaryNode)
|
||||
.put("index.routing.allocation.exclude._name", masterNode)
|
||||
@ -1037,8 +1036,22 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
logger.info("--> stop node with the replica shard");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(replicaNode));
|
||||
|
||||
logger.info("--> index more data, now the replica is stale");
|
||||
indexData();
|
||||
final IndexMetaData.State indexState = randomIndexState();
|
||||
if (indexState == IndexMetaData.State.OPEN) {
|
||||
logger.info("--> index more data, now the replica is stale");
|
||||
indexData();
|
||||
} else {
|
||||
logger.info("--> close the index, now the replica is stale");
|
||||
assertAcked(client().admin().indices().prepareClose("idx"));
|
||||
|
||||
final ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth("idx")
|
||||
.setTimeout(TimeValue.timeValueSeconds(30))
|
||||
.setWaitForActiveShards(ActiveShardCount.ONE)
|
||||
.setWaitForNoInitializingShards(true)
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.get();
|
||||
assertThat(clusterHealthResponse.getStatus().value(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW.value()));
|
||||
}
|
||||
|
||||
logger.info("--> stop the node with the primary");
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(primaryNode));
|
||||
@ -1147,21 +1160,39 @@ public final class ClusterAllocationExplainIT extends ESIntegTestCase {
|
||||
return explanation;
|
||||
}
|
||||
|
||||
private void createIndexAndIndexData(int numPrimaries, int numReplicas) {
|
||||
createIndexAndIndexData(numPrimaries, numReplicas, Settings.EMPTY, ActiveShardCount.ALL);
|
||||
private void prepareIndex(final int numPrimaries, final int numReplicas) {
|
||||
prepareIndex(randomIndexState(), numPrimaries, numReplicas, Settings.EMPTY, ActiveShardCount.ALL);
|
||||
}
|
||||
|
||||
private void createIndexAndIndexData(int numPrimaries, int numReplicas, Settings settings, ActiveShardCount activeShardCount) {
|
||||
client().admin().indices().prepareCreate("idx")
|
||||
private void prepareIndex(final IndexMetaData.State state, final int numPrimaries, final int numReplicas,
|
||||
final Settings settings, final ActiveShardCount activeShardCount) {
|
||||
|
||||
logger.info("--> creating a {} index with {} primary, {} replicas", state, numPrimaries, numReplicas);
|
||||
assertAcked(client().admin().indices().prepareCreate("idx")
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", numPrimaries)
|
||||
.put("index.number_of_replicas", numReplicas)
|
||||
.put(settings))
|
||||
.setWaitForActiveShards(activeShardCount)
|
||||
.get();
|
||||
.get());
|
||||
|
||||
if (activeShardCount != ActiveShardCount.NONE) {
|
||||
indexData();
|
||||
}
|
||||
if (state == IndexMetaData.State.CLOSE) {
|
||||
assertAcked(client().admin().indices().prepareClose("idx"));
|
||||
|
||||
final ClusterHealthResponse clusterHealthResponse = client().admin().cluster().prepareHealth("idx")
|
||||
.setTimeout(TimeValue.timeValueSeconds(30))
|
||||
.setWaitForActiveShards(activeShardCount)
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.get();
|
||||
assertThat(clusterHealthResponse.getStatus().value(), lessThanOrEqualTo(ClusterHealthStatus.YELLOW.value()));
|
||||
}
|
||||
}
|
||||
|
||||
private static IndexMetaData.State randomIndexState() {
|
||||
return randomFrom(IndexMetaData.State.values());
|
||||
}
|
||||
|
||||
private void indexData() {
|
||||
|
@ -19,15 +19,23 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.health;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.test.VersionUtils.getPreviousVersion;
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
|
||||
import static org.hamcrest.core.IsEqual.equalTo;
|
||||
|
||||
public class ClusterHealthRequestTests extends ESTestCase {
|
||||
|
||||
public void testSerialize() throws Exception {
|
||||
final ClusterHealthRequest originalRequest = randomRequest();
|
||||
final ClusterHealthRequest cloneRequest;
|
||||
@ -43,9 +51,89 @@ public class ClusterHealthRequestTests extends ESTestCase {
|
||||
assertThat(cloneRequest.waitForNoRelocatingShards(), equalTo(originalRequest.waitForNoRelocatingShards()));
|
||||
assertThat(cloneRequest.waitForActiveShards(), equalTo(originalRequest.waitForActiveShards()));
|
||||
assertThat(cloneRequest.waitForEvents(), equalTo(originalRequest.waitForEvents()));
|
||||
assertIndicesEquals(cloneRequest.indices(), originalRequest.indices());
|
||||
assertThat(cloneRequest.indicesOptions(), equalTo(originalRequest.indicesOptions()));
|
||||
}
|
||||
|
||||
ClusterHealthRequest randomRequest() {
|
||||
public void testBwcSerialization() throws Exception {
|
||||
for (int runs = 0; runs < randomIntBetween(5, 20); runs++) {
|
||||
// Generate a random cluster health request in version < 7.1.0 and serializes it
|
||||
final BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_1_0)));
|
||||
|
||||
final ClusterHealthRequest expected = randomRequest();
|
||||
{
|
||||
expected.getParentTask().writeTo(out);
|
||||
out.writeTimeValue(expected.masterNodeTimeout());
|
||||
out.writeBoolean(expected.local());
|
||||
if (expected.indices() == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(expected.indices().length);
|
||||
for (String index : expected.indices()) {
|
||||
out.writeString(index);
|
||||
}
|
||||
}
|
||||
out.writeTimeValue(expected.timeout());
|
||||
if (expected.waitForStatus() == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
out.writeByte(expected.waitForStatus().value());
|
||||
}
|
||||
out.writeBoolean(expected.waitForNoRelocatingShards());
|
||||
expected.waitForActiveShards().writeTo(out);
|
||||
out.writeString(expected.waitForNodes());
|
||||
if (expected.waitForEvents() == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
Priority.writeTo(expected.waitForEvents(), out);
|
||||
}
|
||||
out.writeBoolean(expected.waitForNoInitializingShards());
|
||||
}
|
||||
|
||||
// Deserialize and check the cluster health request
|
||||
final StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(out.getVersion());
|
||||
final ClusterHealthRequest actual = new ClusterHealthRequest(in);
|
||||
|
||||
assertThat(actual.waitForStatus(), equalTo(expected.waitForStatus()));
|
||||
assertThat(actual.waitForNodes(), equalTo(expected.waitForNodes()));
|
||||
assertThat(actual.waitForNoInitializingShards(), equalTo(expected.waitForNoInitializingShards()));
|
||||
assertThat(actual.waitForNoRelocatingShards(), equalTo(expected.waitForNoRelocatingShards()));
|
||||
assertThat(actual.waitForActiveShards(), equalTo(expected.waitForActiveShards()));
|
||||
assertThat(actual.waitForEvents(), equalTo(expected.waitForEvents()));
|
||||
assertIndicesEquals(actual.indices(), expected.indices());
|
||||
assertThat(actual.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen()));
|
||||
}
|
||||
|
||||
for (int runs = 0; runs < randomIntBetween(5, 20); runs++) {
|
||||
// Generate a random cluster health request in current version
|
||||
final ClusterHealthRequest expected = randomRequest();
|
||||
|
||||
// Serialize to node in version < 7.1.0
|
||||
final BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_3_0, getPreviousVersion(Version.V_7_1_0)));
|
||||
expected.writeTo(out);
|
||||
|
||||
// Deserialize and check the cluster health request
|
||||
final StreamInput in = out.bytes().streamInput();
|
||||
in.setVersion(out.getVersion());
|
||||
final ClusterHealthRequest actual = new ClusterHealthRequest(in);
|
||||
|
||||
assertThat(actual.waitForStatus(), equalTo(expected.waitForStatus()));
|
||||
assertThat(actual.waitForNodes(), equalTo(expected.waitForNodes()));
|
||||
assertThat(actual.waitForNoInitializingShards(), equalTo(expected.waitForNoInitializingShards()));
|
||||
assertThat(actual.waitForNoRelocatingShards(), equalTo(expected.waitForNoRelocatingShards()));
|
||||
assertThat(actual.waitForActiveShards(), equalTo(expected.waitForActiveShards()));
|
||||
assertThat(actual.waitForEvents(), equalTo(expected.waitForEvents()));
|
||||
assertIndicesEquals(actual.indices(), expected.indices());
|
||||
assertThat(actual.indicesOptions(), equalTo(IndicesOptions.lenientExpandOpen()));
|
||||
}
|
||||
}
|
||||
|
||||
private ClusterHealthRequest randomRequest() {
|
||||
ClusterHealthRequest request = new ClusterHealthRequest();
|
||||
request.waitForStatus(randomFrom(ClusterHealthStatus.values()));
|
||||
request.waitForNodes(randomFrom("", "<", "<=", ">", ">=") + between(0, 1000));
|
||||
@ -53,7 +141,21 @@ public class ClusterHealthRequestTests extends ESTestCase {
|
||||
request.waitForNoRelocatingShards(randomBoolean());
|
||||
request.waitForActiveShards(randomIntBetween(0, 10));
|
||||
request.waitForEvents(randomFrom(Priority.values()));
|
||||
if (randomBoolean()) {
|
||||
final String[] indices = new String[randomIntBetween(1, 10)];
|
||||
for (int i = 0; i < indices.length; i++) {
|
||||
indices[i] = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
|
||||
}
|
||||
request.indices(indices);
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
return request;
|
||||
}
|
||||
|
||||
private static void assertIndicesEquals(final String[] actual, final String[] expected) {
|
||||
// null indices in ClusterHealthRequest is deserialized as empty string array
|
||||
assertArrayEquals(expected != null ? expected : Strings.EMPTY_ARRAY, actual);
|
||||
}
|
||||
}
|
||||
|
@ -0,0 +1,114 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.tasks.TaskId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
|
||||
|
||||
public class CloseIndexRequestTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
final CloseIndexRequest request = randomRequest();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
request.writeTo(out);
|
||||
|
||||
final CloseIndexRequest deserializedRequest = new CloseIndexRequest();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
deserializedRequest.readFrom(in);
|
||||
}
|
||||
assertEquals(request.timeout(), deserializedRequest.timeout());
|
||||
assertEquals(request.masterNodeTimeout(), deserializedRequest.masterNodeTimeout());
|
||||
assertEquals(request.indicesOptions(), deserializedRequest.indicesOptions());
|
||||
assertEquals(request.getParentTask(), deserializedRequest.getParentTask());
|
||||
assertEquals(request.waitForActiveShards(), deserializedRequest.waitForActiveShards());
|
||||
assertArrayEquals(request.indices(), deserializedRequest.indices());
|
||||
}
|
||||
}
|
||||
|
||||
public void testBwcSerialization() throws Exception {
|
||||
{
|
||||
final CloseIndexRequest request = randomRequest();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
request.writeTo(out);
|
||||
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
assertEquals(request.getParentTask(), TaskId.readFromStream(in));
|
||||
assertEquals(request.masterNodeTimeout(), in.readTimeValue());
|
||||
assertEquals(request.timeout(), in.readTimeValue());
|
||||
assertArrayEquals(request.indices(), in.readStringArray());
|
||||
assertEquals(request.indicesOptions(), IndicesOptions.readIndicesOptions(in));
|
||||
}
|
||||
}
|
||||
}
|
||||
{
|
||||
final CloseIndexRequest sample = randomRequest();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
sample.getParentTask().writeTo(out);
|
||||
out.writeTimeValue(sample.masterNodeTimeout());
|
||||
out.writeTimeValue(sample.timeout());
|
||||
out.writeStringArray(sample.indices());
|
||||
sample.indicesOptions().writeIndicesOptions(out);
|
||||
|
||||
final CloseIndexRequest deserializedRequest = new CloseIndexRequest();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
in.setVersion(randomVersionBetween(random(), Version.V_6_4_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
deserializedRequest.readFrom(in);
|
||||
}
|
||||
assertEquals(sample.getParentTask(), deserializedRequest.getParentTask());
|
||||
assertEquals(sample.masterNodeTimeout(), deserializedRequest.masterNodeTimeout());
|
||||
assertEquals(sample.timeout(), deserializedRequest.timeout());
|
||||
assertArrayEquals(sample.indices(), deserializedRequest.indices());
|
||||
assertEquals(sample.indicesOptions(), deserializedRequest.indicesOptions());
|
||||
assertEquals(ActiveShardCount.NONE, deserializedRequest.waitForActiveShards());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private CloseIndexRequest randomRequest() {
|
||||
CloseIndexRequest request = new CloseIndexRequest();
|
||||
request.indices(generateRandomStringArray(10, 5, false, false));
|
||||
if (randomBoolean()) {
|
||||
request.indicesOptions(IndicesOptions.fromOptions(randomBoolean(), randomBoolean(), randomBoolean(), randomBoolean()));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.timeout(randomPositiveTimeValue());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.masterNodeTimeout(randomPositiveTimeValue());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.setParentTask(randomAlphaOfLength(5), randomNonNegativeLong());
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
request.waitForActiveShards(randomFrom(ActiveShardCount.DEFAULT, ActiveShardCount.NONE, ActiveShardCount.ONE,
|
||||
ActiveShardCount.ALL));
|
||||
}
|
||||
return request;
|
||||
}
|
||||
}
|
@ -0,0 +1,86 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.close;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import static org.elasticsearch.test.VersionUtils.randomVersionBetween;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class CloseIndexResponseTests extends ESTestCase {
|
||||
|
||||
public void testSerialization() throws Exception {
|
||||
final CloseIndexResponse response = randomResponse();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
response.writeTo(out);
|
||||
|
||||
final CloseIndexResponse deserializedResponse = new CloseIndexResponse();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
deserializedResponse.readFrom(in);
|
||||
}
|
||||
assertCloseIndexResponse(deserializedResponse, response);
|
||||
}
|
||||
}
|
||||
|
||||
public void testBwcSerialization() throws Exception {
|
||||
{
|
||||
final CloseIndexResponse response = randomResponse();
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
out.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
response.writeTo(out);
|
||||
|
||||
final AcknowledgedResponse deserializedResponse = new AcknowledgedResponse();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
deserializedResponse.readFrom(in);
|
||||
}
|
||||
assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged()));
|
||||
}
|
||||
}
|
||||
{
|
||||
final AcknowledgedResponse response = new AcknowledgedResponse(randomBoolean());
|
||||
try (BytesStreamOutput out = new BytesStreamOutput()) {
|
||||
response.writeTo(out);
|
||||
|
||||
final CloseIndexResponse deserializedResponse = new CloseIndexResponse();
|
||||
try (StreamInput in = out.bytes().streamInput()) {
|
||||
in.setVersion(randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_1_0)));
|
||||
deserializedResponse.readFrom(in);
|
||||
}
|
||||
assertThat(deserializedResponse.isAcknowledged(), equalTo(response.isAcknowledged()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private CloseIndexResponse randomResponse() {
|
||||
final boolean acknowledged = randomBoolean();
|
||||
final boolean shardsAcknowledged = acknowledged ? randomBoolean() : false;
|
||||
return new CloseIndexResponse(acknowledged, shardsAcknowledged);
|
||||
}
|
||||
|
||||
private static void assertCloseIndexResponse(final CloseIndexResponse actual, final CloseIndexResponse expected) {
|
||||
assertThat(actual.isAcknowledged(), equalTo(expected.isAcknowledged()));
|
||||
assertThat(actual.isShardsAcknowledged(), equalTo(expected.isShardsAcknowledged()));
|
||||
}
|
||||
}
|
@ -20,13 +20,19 @@
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.not;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class ClusterHealthIT extends ESIntegTestCase {
|
||||
|
||||
@ -76,6 +82,159 @@ public class ClusterHealthIT extends ESIntegTestCase {
|
||||
assertThat(healthResponse.getIndices().size(), equalTo(1));
|
||||
}
|
||||
|
||||
public void testHealthWithClosedIndices() {
|
||||
createIndex("index-1");
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth().setWaitForGreenStatus().get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
|
||||
createIndex("index-2");
|
||||
assertAcked(client().admin().indices().prepareClose("index-2"));
|
||||
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth()
|
||||
.setWaitForGreenStatus()
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(2));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-1").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-2").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-*").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(2));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-*")
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2"), nullValue());
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-*")
|
||||
.setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true))
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-1"), nullValue());
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
|
||||
createIndex("index-3", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 50)
|
||||
.build());
|
||||
assertAcked(client().admin().indices().prepareClose("index-3"));
|
||||
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth()
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.setWaitForNoInitializingShards(true)
|
||||
.setWaitForYellowStatus()
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(3));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-1").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-2").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-3").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-*").get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(3));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-*")
|
||||
.setIndicesOptions(IndicesOptions.lenientExpandOpen())
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(1));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2"), nullValue());
|
||||
assertThat(response.getIndices().get("index-3"), nullValue());
|
||||
}
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth("index-*")
|
||||
.setIndicesOptions(IndicesOptions.fromOptions(true, true, false, true))
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(2));
|
||||
assertThat(response.getIndices().get("index-1"), nullValue());
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.YELLOW));
|
||||
}
|
||||
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings("index-3")
|
||||
.setSettings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, numberOfReplicas())
|
||||
.build()));
|
||||
{
|
||||
ClusterHealthResponse response = client().admin().cluster().prepareHealth()
|
||||
.setWaitForGreenStatus()
|
||||
.get();
|
||||
assertThat(response.getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.isTimedOut(), equalTo(false));
|
||||
assertThat(response.getIndices().size(), equalTo(3));
|
||||
assertThat(response.getIndices().get("index-1").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-2").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
assertThat(response.getIndices().get("index-3").getStatus(), equalTo(ClusterHealthStatus.GREEN));
|
||||
}
|
||||
}
|
||||
|
||||
public void testHealthOnIndexCreation() throws Exception {
|
||||
final AtomicBoolean finished = new AtomicBoolean(false);
|
||||
Thread clusterHealthThread = new Thread() {
|
||||
|
@ -24,6 +24,8 @@ import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData.State;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
@ -33,9 +35,11 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.anyOf;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ -54,7 +58,6 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
.put("cluster.routing.allocation.awareness.attributes", "rack_id")
|
||||
.build();
|
||||
|
||||
|
||||
logger.info("--> starting 2 nodes on the same rack");
|
||||
internalCluster().startNodes(2, Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_1").build());
|
||||
|
||||
@ -68,6 +71,9 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
|
||||
ensureGreen();
|
||||
|
||||
final List<String> indicesToClose = randomSubsetOf(Arrays.asList("test1", "test2"));
|
||||
indicesToClose.forEach(indexToClose -> assertAcked(client().admin().indices().prepareClose(indexToClose).get()));
|
||||
|
||||
logger.info("--> starting 1 node on a different rack");
|
||||
final String node3 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.rack_id", "rack_2").build());
|
||||
|
||||
@ -75,14 +81,23 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
assertThat(awaitBusy(
|
||||
() -> {
|
||||
logger.info("--> waiting for no relocation");
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus().setWaitForNodes("3").setWaitForNoRelocatingShards(true).get();
|
||||
ClusterHealthResponse clusterHealth = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test1", "test2")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("3")
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.get();
|
||||
if (clusterHealth.isTimedOut()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.info("--> checking current state");
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
// check that closed indices are effectively closed
|
||||
if (indicesToClose.stream().anyMatch(index -> clusterState.metaData().index(index).getState() != State.CLOSE)) {
|
||||
return false;
|
||||
}
|
||||
// verify that we have all the primaries on node3
|
||||
ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();
|
||||
for (IndexRoutingTable indexRoutingTable : clusterState.routingTable()) {
|
||||
@ -99,7 +114,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
), equalTo(true));
|
||||
}
|
||||
|
||||
public void testAwarenessZones() throws Exception {
|
||||
public void testAwarenessZones() {
|
||||
Settings commonSettings = Settings.builder()
|
||||
.put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING.getKey() + "zone.values", "a,b")
|
||||
.put(AwarenessAllocationDecider.CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING.getKey(), "zone")
|
||||
@ -121,12 +136,20 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForNodes("4").execute().actionGet();
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_shards", 5)
|
||||
.put("index.number_of_replicas", 1)).execute().actionGet();
|
||||
createIndex("test", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.build());
|
||||
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
}
|
||||
|
||||
logger.info("--> waiting for shards to be allocated");
|
||||
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus()
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
|
||||
@ -146,7 +169,7 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
assertThat(counts.get(B_0), anyOf(equalTo(2),equalTo(3)));
|
||||
}
|
||||
|
||||
public void testAwarenessZonesIncrementalNodes() throws Exception {
|
||||
public void testAwarenessZonesIncrementalNodes() {
|
||||
Settings commonSettings = Settings.builder()
|
||||
.put("cluster.routing.allocation.awareness.force.zone.values", "a,b")
|
||||
.put("cluster.routing.allocation.awareness.attributes", "zone")
|
||||
@ -159,11 +182,23 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
);
|
||||
String A_0 = nodes.get(0);
|
||||
String B_0 = nodes.get(1);
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_shards", 5)
|
||||
.put("index.number_of_replicas", 1)).execute().actionGet();
|
||||
ClusterHealthResponse health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus().setWaitForNodes("2").setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
|
||||
createIndex("test", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.build());
|
||||
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
}
|
||||
|
||||
ClusterHealthResponse health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("2")
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.execute().actionGet();
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
ObjectIntHashMap<String> counts = new ObjectIntHashMap<>();
|
||||
@ -180,12 +215,22 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
logger.info("--> starting another node in zone 'b'");
|
||||
|
||||
String B_1 = internalCluster().startNode(Settings.builder().put(commonSettings).put("node.attr.zone", "b").build());
|
||||
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus()
|
||||
.setWaitForNodes("3").execute().actionGet();
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("3")
|
||||
.execute().actionGet();
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus()
|
||||
.setWaitForNodes("3").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("3")
|
||||
.setWaitForActiveShards(10)
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.execute().actionGet();
|
||||
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
@ -204,12 +249,22 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
assertThat(counts.get(B_1), equalTo(2));
|
||||
|
||||
String noZoneNode = internalCluster().startNode();
|
||||
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus()
|
||||
.setWaitForNodes("4").execute().actionGet();
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("4")
|
||||
.execute().actionGet();
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus()
|
||||
.setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("4")
|
||||
.setWaitForActiveShards(10)
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.execute().actionGet();
|
||||
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
@ -231,8 +286,14 @@ public class AwarenessAllocationIT extends ESIntegTestCase {
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.awareness.attributes", "").build()).get();
|
||||
|
||||
health = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus()
|
||||
.setWaitForNodes("4").setWaitForActiveShards(10).setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
health = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForGreenStatus()
|
||||
.setWaitForNodes("4")
|
||||
.setWaitForActiveShards(10)
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.execute().actionGet();
|
||||
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster.allocation;
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthResponse;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.ClusterRerouteResponse;
|
||||
import org.elasticsearch.action.admin.cluster.reroute.TransportClusterRerouteAction;
|
||||
@ -34,6 +33,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.RerouteExplanation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocateEmptyPrimaryAllocationCommand;
|
||||
@ -48,6 +48,7 @@ import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -102,6 +103,10 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
.setSettings(Settings.builder().put("index.number_of_shards", 1))
|
||||
.execute().actionGet();
|
||||
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareClose("test").get();
|
||||
}
|
||||
|
||||
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
||||
|
||||
@ -128,8 +133,11 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||
equalTo(ShardRoutingState.INITIALIZING));
|
||||
|
||||
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForYellowStatus().execute().actionGet();
|
||||
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForYellowStatus()
|
||||
.execute().actionGet();
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
|
||||
logger.info("--> get the state, verify shard 1 primary allocated");
|
||||
@ -149,9 +157,12 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_2).getId()).iterator().next().state(),
|
||||
equalTo(ShardRoutingState.INITIALIZING));
|
||||
|
||||
|
||||
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForYellowStatus()
|
||||
.setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
healthResponse = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForYellowStatus()
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.execute().actionGet();
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
|
||||
logger.info("--> get the state, verify shard 1 primary moved from node1 to node2");
|
||||
@ -193,11 +204,15 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
|
||||
logger.info("--> create indices");
|
||||
for (int i = 0; i < 25; i++) {
|
||||
client().admin().indices().prepareCreate("test" + i)
|
||||
.setSettings(Settings.builder()
|
||||
.put("index.number_of_shards", 5).put("index.number_of_replicas", 1)
|
||||
.put("index.unassigned.node_left.delayed_timeout", randomIntBetween(250, 1000) + "ms"))
|
||||
.execute().actionGet();
|
||||
final String indexName = "test" + i;
|
||||
createIndex(indexName, Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 5)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(UnassignedInfo.INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING.getKey(), randomIntBetween(250, 1000) + "ms")
|
||||
.build());
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose(indexName));
|
||||
}
|
||||
}
|
||||
|
||||
ensureGreen(TimeValue.timeValueMinutes(1));
|
||||
@ -222,6 +237,11 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
.setSettings(Settings.builder().put("index.number_of_shards", 1))
|
||||
.execute().actionGet();
|
||||
|
||||
final boolean closed = randomBoolean();
|
||||
if (closed) {
|
||||
client().admin().indices().prepareClose("test").get();
|
||||
}
|
||||
|
||||
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(2));
|
||||
|
||||
@ -234,8 +254,11 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||
equalTo(ShardRoutingState.INITIALIZING));
|
||||
|
||||
healthResponse = client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForYellowStatus().execute().actionGet();
|
||||
healthResponse = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForEvents(Priority.LANGUID)
|
||||
.setWaitForYellowStatus()
|
||||
.execute().actionGet();
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
|
||||
logger.info("--> get the state, verify shard 1 primary allocated");
|
||||
@ -244,8 +267,10 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
assertThat(state.getRoutingNodes().node(state.nodes().resolveNode(node_1).getId()).iterator().next().state(),
|
||||
equalTo(ShardRoutingState.STARTED));
|
||||
|
||||
client().prepareIndex("test", "type", "1").setSource("field", "value")
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
|
||||
if (closed == false) {
|
||||
client().prepareIndex("test", "type", "1").setSource("field", "value")
|
||||
.setRefreshPolicy(RefreshPolicy.IMMEDIATE).get();
|
||||
}
|
||||
final Index index = resolveIndex("test");
|
||||
|
||||
logger.info("--> closing all nodes");
|
||||
@ -263,7 +288,10 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
// wait a bit for the cluster to realize that the shard is not there...
|
||||
// TODO can we get around this? the cluster is RED, so what do we wait for?
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
assertThat(client().admin().cluster().prepareHealth().setWaitForNodes("2").execute().actionGet().getStatus(),
|
||||
assertThat(client().admin().cluster().prepareHealth()
|
||||
.setIndices("test")
|
||||
.setWaitForNodes("2")
|
||||
.execute().actionGet().getStatus(),
|
||||
equalTo(ClusterHealthStatus.RED));
|
||||
logger.info("--> explicitly allocate primary");
|
||||
state = client().admin().cluster().prepareReroute()
|
||||
@ -294,10 +322,14 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
|
||||
logger.info("--> create an index with 1 shard");
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0))
|
||||
.execute().actionGet();
|
||||
createIndex("test", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build());
|
||||
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
}
|
||||
ensureGreen("test");
|
||||
|
||||
logger.info("--> disable allocation");
|
||||
@ -403,12 +435,18 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
Loggers.removeAppender(actionLogger, allocateMockLog);
|
||||
}
|
||||
|
||||
public void testClusterRerouteWithBlocks() throws Exception {
|
||||
public void testClusterRerouteWithBlocks() {
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
|
||||
logger.info("--> create an index with 1 shard and 0 replicas");
|
||||
assertAcked(prepareCreate("test-blocks").setSettings(Settings.builder().put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 0)));
|
||||
createIndex("test-blocks", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build());
|
||||
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test-blocks"));
|
||||
}
|
||||
ensureGreen("test-blocks");
|
||||
|
||||
logger.info("--> check that the index has 1 shard");
|
||||
@ -432,11 +470,14 @@ public class ClusterRerouteIT extends ESIntegTestCase {
|
||||
SETTING_READ_ONLY_ALLOW_DELETE)) {
|
||||
try {
|
||||
enableIndexBlock("test-blocks", blockSetting);
|
||||
assertAcked(client().admin().cluster().prepareReroute().add(new MoveAllocationCommand("test-blocks", 0,
|
||||
nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))));
|
||||
assertAcked(client().admin().cluster().prepareReroute()
|
||||
.add(new MoveAllocationCommand("test-blocks", 0, nodesIds.get(toggle % 2), nodesIds.get(++toggle % 2))));
|
||||
|
||||
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth().setWaitForYellowStatus()
|
||||
.setWaitForNoRelocatingShards(true).execute().actionGet();
|
||||
ClusterHealthResponse healthResponse = client().admin().cluster().prepareHealth()
|
||||
.setIndices("test-blocks")
|
||||
.setWaitForYellowStatus()
|
||||
.setWaitForNoRelocatingShards(true)
|
||||
.execute().actionGet();
|
||||
assertThat(healthResponse.isTimedOut(), equalTo(false));
|
||||
} finally {
|
||||
disableIndexBlock("test-blocks", blockSetting);
|
||||
|
@ -19,10 +19,9 @@
|
||||
|
||||
package org.elasticsearch.cluster.allocation;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
@ -41,14 +40,13 @@ import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
@ClusterScope(scope= Scope.TEST, numDataNodes =0)
|
||||
public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
|
||||
private final Logger logger = LogManager.getLogger(FilteringAllocationIT.class);
|
||||
|
||||
public void testDecommissionNodeNoReplicas() throws Exception {
|
||||
public void testDecommissionNodeNoReplicas() {
|
||||
logger.info("--> starting 2 nodes");
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
final String node_0 = nodesIds.get(0);
|
||||
@ -56,10 +54,10 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
assertThat(cluster().size(), equalTo(2));
|
||||
|
||||
logger.info("--> creating an index with no replicas");
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_replicas", 0))
|
||||
.execute().actionGet();
|
||||
ensureGreen();
|
||||
createIndex("test", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build());
|
||||
ensureGreen("test");
|
||||
logger.info("--> index some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
client().prepareIndex("test", "type", Integer.toString(i)).setSource("field", "value" + i).execute().actionGet();
|
||||
@ -68,11 +66,17 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery()).execute().actionGet()
|
||||
.getHits().getTotalHits().value, equalTo(100L));
|
||||
|
||||
final boolean closed = randomBoolean();
|
||||
if (closed) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
ensureGreen("test");
|
||||
}
|
||||
|
||||
logger.info("--> decommission the second node");
|
||||
client().admin().cluster().prepareUpdateSettings()
|
||||
.setTransientSettings(Settings.builder().put("cluster.routing.allocation.exclude._name", node_1))
|
||||
.execute().actionGet();
|
||||
waitForRelocation();
|
||||
ensureGreen("test");
|
||||
|
||||
logger.info("--> verify all are allocated on node1 now");
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
@ -84,12 +88,16 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
if (closed) {
|
||||
assertAcked(client().admin().indices().prepareOpen("test"));
|
||||
}
|
||||
|
||||
client().admin().indices().prepareRefresh().execute().actionGet();
|
||||
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery())
|
||||
.execute().actionGet().getHits().getTotalHits().value, equalTo(100L));
|
||||
}
|
||||
|
||||
public void testDisablingAllocationFiltering() throws Exception {
|
||||
public void testDisablingAllocationFiltering() {
|
||||
logger.info("--> starting 2 nodes");
|
||||
List<String> nodesIds = internalCluster().startNodes(2);
|
||||
final String node_0 = nodesIds.get(0);
|
||||
@ -97,11 +105,11 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
assertThat(cluster().size(), equalTo(2));
|
||||
|
||||
logger.info("--> creating an index with no replicas");
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.setSettings(Settings.builder().put("index.number_of_shards", 2).put("index.number_of_replicas", 0))
|
||||
.execute().actionGet();
|
||||
|
||||
ensureGreen();
|
||||
createIndex("test", Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.build());
|
||||
ensureGreen("test");
|
||||
|
||||
logger.info("--> index some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
@ -110,6 +118,13 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
client().admin().indices().prepareRefresh().execute().actionGet();
|
||||
assertThat(client().prepareSearch().setSize(0).setQuery(QueryBuilders.matchAllQuery())
|
||||
.execute().actionGet().getHits().getTotalHits().value, equalTo(100L));
|
||||
|
||||
final boolean closed = randomBoolean();
|
||||
if (closed) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
ensureGreen("test");
|
||||
}
|
||||
|
||||
ClusterState clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
IndexRoutingTable indexRoutingTable = clusterState.routingTable().index("test");
|
||||
int numShardsOnNode1 = 0;
|
||||
@ -133,7 +148,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
.setSettings(Settings.builder().put("index.routing.allocation.exclude._name", node_0))
|
||||
.execute().actionGet();
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
ensureGreen();
|
||||
ensureGreen("test");
|
||||
|
||||
logger.info("--> verify all shards are allocated on node_1 now");
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
@ -149,7 +164,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
.setSettings(Settings.builder().put("index.routing.allocation.exclude._name", ""))
|
||||
.execute().actionGet();
|
||||
client().admin().cluster().prepareReroute().get();
|
||||
ensureGreen();
|
||||
ensureGreen("test");
|
||||
|
||||
logger.info("--> verify that there are shards allocated on both nodes now");
|
||||
clusterState = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
@ -166,7 +181,7 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
assertEquals("invalid IP address [192.168.1.1.] for [" + filterSetting.getKey() + ipKey + "]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testTransientSettingsStillApplied() throws Exception {
|
||||
public void testTransientSettingsStillApplied() {
|
||||
List<String> nodes = internalCluster().startNodes(6);
|
||||
Set<String> excludeNodes = new HashSet<>(nodes.subList(0, 3));
|
||||
Set<String> includeNodes = new HashSet<>(nodes.subList(3, 6));
|
||||
@ -177,6 +192,10 @@ public class FilteringAllocationIT extends ESIntegTestCase {
|
||||
client().admin().indices().prepareCreate("test").get();
|
||||
ensureGreen("test");
|
||||
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
}
|
||||
|
||||
Settings exclude = Settings.builder().put("cluster.routing.allocation.exclude._name",
|
||||
Strings.collectionToCommaDelimitedString(excludeNodes)).build();
|
||||
|
||||
|
@ -39,13 +39,12 @@ public class SimpleAllocationIT extends ESIntegTestCase {
|
||||
return 1;
|
||||
}
|
||||
|
||||
/**
|
||||
* Test for
|
||||
* https://groups.google.com/d/msg/elasticsearch/y-SY_HyoB-8/EZdfNt9VO44J
|
||||
*/
|
||||
public void testSaneAllocation() {
|
||||
assertAcked(prepareCreate("test", 3));
|
||||
ensureGreen();
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
}
|
||||
ensureGreen("test");
|
||||
|
||||
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0));
|
||||
@ -56,7 +55,7 @@ public class SimpleAllocationIT extends ESIntegTestCase {
|
||||
}
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 0)).execute().actionGet();
|
||||
ensureGreen();
|
||||
ensureGreen("test");
|
||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
|
||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0));
|
||||
@ -68,11 +67,14 @@ public class SimpleAllocationIT extends ESIntegTestCase {
|
||||
|
||||
// create another index
|
||||
assertAcked(prepareCreate("test2", 3));
|
||||
ensureGreen();
|
||||
if (randomBoolean()) {
|
||||
assertAcked(client().admin().indices().prepareClose("test2"));
|
||||
}
|
||||
ensureGreen("test2");
|
||||
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_REPLICAS, 1)).execute().actionGet();
|
||||
ensureGreen();
|
||||
ensureGreen("test");
|
||||
state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
|
||||
assertThat(state.getRoutingNodes().unassigned().size(), equalTo(0));
|
||||
|
@ -155,8 +155,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
||||
.put(indexBuilder("foofoo").putAlias(AliasMetaData.builder("barbaz")));
|
||||
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
|
||||
IndicesOptions lenientExpand = IndicesOptions.fromOptions(true, true, true, true);
|
||||
IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.lenientExpandOpen(), lenientExpand};
|
||||
IndicesOptions[] indicesOptions = new IndicesOptions[]{IndicesOptions.lenientExpandOpen(), IndicesOptions.lenientExpand()};
|
||||
for (IndicesOptions options : indicesOptions) {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo");
|
||||
@ -199,7 +198,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(3, results.length);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, lenientExpand);
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpand());
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(Arrays.toString(results), 4, results.length);
|
||||
|
||||
@ -208,7 +207,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
||||
assertEquals(3, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo"));
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, lenientExpand);
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpand());
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*");
|
||||
assertEquals(4, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed"));
|
||||
|
@ -35,6 +35,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.shards.ClusterShardLimitIT;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ValidationException;
|
||||
@ -114,6 +115,60 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testCloseRoutingTableRemovesRoutingTable() {
|
||||
final Set<Index> nonBlockedIndices = new HashSet<>();
|
||||
final Map<Index, ClusterBlock> blockedIndices = new HashMap<>();
|
||||
final Map<Index, AcknowledgedResponse> results = new HashMap<>();
|
||||
final ClusterBlock closingBlock = MetaDataIndexStateService.createIndexClosingBlock();
|
||||
|
||||
ClusterState state = ClusterState.builder(new ClusterName("testCloseRoutingTableRemovesRoutingTable")).build();
|
||||
for (int i = 0; i < randomIntBetween(1, 25); i++) {
|
||||
final String indexName = "index-" + i;
|
||||
|
||||
if (randomBoolean()) {
|
||||
state = addOpenedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state);
|
||||
nonBlockedIndices.add(state.metaData().index(indexName).getIndex());
|
||||
} else {
|
||||
state = addBlockedIndex(indexName, randomIntBetween(1, 5), randomIntBetween(0, 5), state, closingBlock);
|
||||
blockedIndices.put(state.metaData().index(indexName).getIndex(), closingBlock);
|
||||
results.put(state.metaData().index(indexName).getIndex(), new AcknowledgedResponse(randomBoolean()));
|
||||
}
|
||||
}
|
||||
|
||||
state = ClusterState.builder(state)
|
||||
.nodes(DiscoveryNodes.builder(state.nodes())
|
||||
.add(new DiscoveryNode("old_node", buildNewFakeTransportAddress(), emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_0_0))
|
||||
.add(new DiscoveryNode("new_node", buildNewFakeTransportAddress(), emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_7_1_0)))
|
||||
.build();
|
||||
|
||||
state = MetaDataIndexStateService.closeRoutingTable(state, blockedIndices, results);
|
||||
assertThat(state.metaData().indices().size(), equalTo(nonBlockedIndices.size() + blockedIndices.size()));
|
||||
|
||||
for (Index nonBlockedIndex : nonBlockedIndices) {
|
||||
assertIsOpened(nonBlockedIndex.getName(), state);
|
||||
assertThat(state.blocks().hasIndexBlockWithId(nonBlockedIndex.getName(), INDEX_CLOSED_BLOCK_ID), is(false));
|
||||
}
|
||||
for (Index blockedIndex : blockedIndices.keySet()) {
|
||||
if (results.get(blockedIndex).isAcknowledged()) {
|
||||
IndexMetaData indexMetaData = state.metaData().index(blockedIndex);
|
||||
assertThat(indexMetaData.getState(), is(IndexMetaData.State.CLOSE));
|
||||
Settings indexSettings = indexMetaData.getSettings();
|
||||
assertThat(indexSettings.hasValue(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false));
|
||||
assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK), is(true));
|
||||
assertThat("Index must have only 1 block with [id=" + MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]",
|
||||
state.blocks().indices().getOrDefault(blockedIndex.getName(), emptySet()).stream()
|
||||
.filter(clusterBlock -> clusterBlock.id() == MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID).count(), equalTo(1L));
|
||||
assertThat("Index routing table should have been removed when closing the index on mixed cluster version",
|
||||
state.routingTable().index(blockedIndex), nullValue());
|
||||
} else {
|
||||
assertIsOpened(blockedIndex.getName(), state);
|
||||
assertThat(state.blocks().hasIndexBlock(blockedIndex.getName(), closingBlock), is(true));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void testAddIndexClosedBlocks() {
|
||||
final ClusterState initialState = ClusterState.builder(new ClusterName("testAddIndexClosedBlocks")).build();
|
||||
{
|
||||
@ -139,7 +194,6 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
ClusterState updatedState = MetaDataIndexStateService.addIndexClosedBlocks(indices, blockedIndices, state);
|
||||
assertSame(state, updatedState);
|
||||
assertTrue(blockedIndices.isEmpty());
|
||||
|
||||
}
|
||||
{
|
||||
final Map<Index, ClusterBlock> blockedIndices = new HashMap<>();
|
||||
@ -190,14 +244,6 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
ClusterState state = addOpenedIndex("index-1", randomIntBetween(1, 3), randomIntBetween(0, 3), initialState);
|
||||
state = addOpenedIndex("index-2", randomIntBetween(1, 3), randomIntBetween(0, 3), state);
|
||||
state = addOpenedIndex("index-3", randomIntBetween(1, 3), randomIntBetween(0, 3), state);
|
||||
final boolean mixedVersions = randomBoolean();
|
||||
if (mixedVersions) {
|
||||
state = ClusterState.builder(state)
|
||||
.nodes(DiscoveryNodes.builder(state.nodes())
|
||||
.add(new DiscoveryNode("old_node", buildNewFakeTransportAddress(), emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())), Version.V_6_0_0)))
|
||||
.build();
|
||||
}
|
||||
|
||||
Index index1 = state.metaData().index("index-1").getIndex();
|
||||
Index index2 = state.metaData().index("index-2").getIndex();
|
||||
@ -209,11 +255,7 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
|
||||
for (Index index : indices) {
|
||||
assertTrue(blockedIndices.containsKey(index));
|
||||
if (mixedVersions) {
|
||||
assertIsClosed(index.getName(), updatedState);
|
||||
} else {
|
||||
assertHasBlock(index.getName(), updatedState, blockedIndices.get(index));
|
||||
}
|
||||
assertHasBlock(index.getName(), updatedState, blockedIndices.get(index));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -262,6 +304,32 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
currentShards + "]/[" + maxShards + "] maximum shards open;", exception.getMessage());
|
||||
}
|
||||
|
||||
public void testIsIndexVerifiedBeforeClosed() {
|
||||
final ClusterState initialState = ClusterState.builder(new ClusterName("testIsIndexMetaDataClosed")).build();
|
||||
{
|
||||
String indexName = "open";
|
||||
ClusterState state = addOpenedIndex(indexName, randomIntBetween(1, 3), randomIntBetween(0, 3), initialState);
|
||||
assertFalse(MetaDataIndexStateService.isIndexVerifiedBeforeClosed(state.getMetaData().index(indexName)));
|
||||
}
|
||||
{
|
||||
String indexName = "closed";
|
||||
ClusterState state = addClosedIndex(indexName, randomIntBetween(1, 3), randomIntBetween(0, 3), initialState);
|
||||
assertTrue(MetaDataIndexStateService.isIndexVerifiedBeforeClosed(state.getMetaData().index(indexName)));
|
||||
}
|
||||
{
|
||||
String indexName = "closed-no-setting";
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(indexName)
|
||||
.state(IndexMetaData.State.CLOSE)
|
||||
.creationDate(randomNonNegativeLong())
|
||||
.settings(Settings.builder()
|
||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, randomIntBetween(1, 3))
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, randomIntBetween(0, 3)))
|
||||
.build();
|
||||
assertFalse(MetaDataIndexStateService.isIndexVerifiedBeforeClosed(indexMetaData));
|
||||
}
|
||||
}
|
||||
|
||||
public static ClusterState createClusterForShardLimitTest(int nodesInCluster, int openIndexShards, int openIndexReplicas,
|
||||
int closedIndexShards, int closedIndexReplicas, Settings clusterSettings) {
|
||||
ImmutableOpenMap.Builder<String, DiscoveryNode> dataNodes = ImmutableOpenMap.builder();
|
||||
@ -334,31 +402,35 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
final int numReplicas,
|
||||
final IndexMetaData.State state,
|
||||
@Nullable final ClusterBlock block) {
|
||||
|
||||
final Settings.Builder settings = Settings.builder()
|
||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, numShards)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, numReplicas);
|
||||
if (state == IndexMetaData.State.CLOSE) {
|
||||
settings.put(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true);
|
||||
}
|
||||
final IndexMetaData indexMetaData = IndexMetaData.builder(index)
|
||||
.state(state)
|
||||
.creationDate(randomNonNegativeLong())
|
||||
.settings(Settings.builder()
|
||||
.put(SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, numShards)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, numReplicas))
|
||||
.settings(settings)
|
||||
.build();
|
||||
|
||||
final ClusterState.Builder clusterStateBuilder = ClusterState.builder(currentState);
|
||||
clusterStateBuilder.metaData(MetaData.builder(currentState.metaData()).put(indexMetaData, true));
|
||||
|
||||
if (state == IndexMetaData.State.OPEN) {
|
||||
final IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex());
|
||||
for (int j = 0; j < indexMetaData.getNumberOfShards(); j++) {
|
||||
ShardId shardId = new ShardId(indexMetaData.getIndex(), j);
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||
indexShardRoutingBuilder.addShard(newShardRouting(shardId, randomAlphaOfLength(10), true, ShardRoutingState.STARTED));
|
||||
for (int k = 0; k < indexMetaData.getNumberOfReplicas(); k++) {
|
||||
indexShardRoutingBuilder.addShard(newShardRouting(shardId, randomAlphaOfLength(10), false, ShardRoutingState.STARTED));
|
||||
}
|
||||
indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build());
|
||||
final IndexRoutingTable.Builder indexRoutingTable = IndexRoutingTable.builder(indexMetaData.getIndex());
|
||||
for (int j = 0; j < indexMetaData.getNumberOfShards(); j++) {
|
||||
ShardId shardId = new ShardId(indexMetaData.getIndex(), j);
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(shardId);
|
||||
indexShardRoutingBuilder.addShard(newShardRouting(shardId, randomAlphaOfLength(10), true, ShardRoutingState.STARTED));
|
||||
for (int k = 0; k < indexMetaData.getNumberOfReplicas(); k++) {
|
||||
indexShardRoutingBuilder.addShard(newShardRouting(shardId, randomAlphaOfLength(10), false, ShardRoutingState.STARTED));
|
||||
}
|
||||
clusterStateBuilder.routingTable(RoutingTable.builder(currentState.routingTable()).add(indexRoutingTable).build());
|
||||
indexRoutingTable.addIndexShard(indexShardRoutingBuilder.build());
|
||||
}
|
||||
clusterStateBuilder.routingTable(RoutingTable.builder(currentState.routingTable()).add(indexRoutingTable).build());
|
||||
|
||||
if (block != null) {
|
||||
clusterStateBuilder.blocks(ClusterBlocks.builder().blocks(currentState.blocks()).addIndexBlock(index, block));
|
||||
}
|
||||
@ -366,17 +438,33 @@ public class MetaDataIndexStateServiceTests extends ESTestCase {
|
||||
}
|
||||
|
||||
private static void assertIsOpened(final String indexName, final ClusterState clusterState) {
|
||||
assertThat(clusterState.metaData().index(indexName).getState(), is(IndexMetaData.State.OPEN));
|
||||
final IndexMetaData indexMetaData = clusterState.metaData().indices().get(indexName);
|
||||
assertThat(indexMetaData.getState(), is(IndexMetaData.State.OPEN));
|
||||
assertThat(indexMetaData.getSettings().hasValue(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false));
|
||||
assertThat(clusterState.routingTable().index(indexName), notNullValue());
|
||||
assertThat(clusterState.blocks().hasIndexBlock(indexName, MetaDataIndexStateService.INDEX_CLOSED_BLOCK), is(false));
|
||||
assertThat(clusterState.routingTable().index(indexName), notNullValue());
|
||||
}
|
||||
|
||||
private static void assertIsClosed(final String indexName, final ClusterState clusterState) {
|
||||
assertThat(clusterState.metaData().index(indexName).getState(), is(IndexMetaData.State.CLOSE));
|
||||
assertThat(clusterState.routingTable().index(indexName), nullValue());
|
||||
final IndexMetaData indexMetaData = clusterState.metaData().indices().get(indexName);
|
||||
assertThat(indexMetaData.getState(), is(IndexMetaData.State.CLOSE));
|
||||
final Settings indexSettings = indexMetaData.getSettings();
|
||||
assertThat(indexSettings.hasValue(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(true));
|
||||
assertThat(indexSettings.getAsBoolean(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey(), false), is(true));
|
||||
assertThat(clusterState.blocks().hasIndexBlock(indexName, MetaDataIndexStateService.INDEX_CLOSED_BLOCK), is(true));
|
||||
assertThat("Index " + indexName + " must have only 1 block with [id=" + MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]",
|
||||
clusterState.blocks().indices().getOrDefault(indexName, emptySet()).stream()
|
||||
.filter(clusterBlock -> clusterBlock.id() == MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID).count(), equalTo(1L));
|
||||
|
||||
final IndexRoutingTable indexRoutingTable = clusterState.routingTable().index(indexName);
|
||||
assertThat(indexRoutingTable, notNullValue());
|
||||
|
||||
for(IndexShardRoutingTable shardRoutingTable : indexRoutingTable) {
|
||||
assertThat(shardRoutingTable.shards().stream().allMatch(ShardRouting::unassigned), is(true));
|
||||
assertThat(shardRoutingTable.shards().stream().map(ShardRouting::unassignedInfo).map(UnassignedInfo::getReason)
|
||||
.allMatch(info -> info == UnassignedInfo.Reason.INDEX_CLOSED), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertHasBlock(final String indexName, final ClusterState clusterState, final ClusterBlock closingBlock) {
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ESAllocationTestCase;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes.Builder;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
@ -38,6 +39,7 @@ import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.INITIALIZING;
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingState.UNASSIGNED;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
@ -373,6 +375,36 @@ public class RoutingTableTests extends ESAllocationTestCase {
|
||||
assertFalse(IndexShardRoutingTable.Builder.distinctNodes(Arrays.asList(routing2, routing4)));
|
||||
}
|
||||
|
||||
public void testAddAsRecovery() {
|
||||
{
|
||||
final IndexMetaData indexMetaData = createIndexMetaData(TEST_INDEX_1).state(IndexMetaData.State.OPEN).build();
|
||||
final RoutingTable routingTable = new RoutingTable.Builder().addAsRecovery(indexMetaData).build();
|
||||
assertThat(routingTable.hasIndex(TEST_INDEX_1), is(true));
|
||||
assertThat(routingTable.allShards(TEST_INDEX_1).size(), is(this.shardsPerIndex));
|
||||
assertThat(routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size(), is(this.shardsPerIndex));
|
||||
}
|
||||
{
|
||||
final IndexMetaData indexMetaData = createIndexMetaData(TEST_INDEX_1).state(IndexMetaData.State.CLOSE).build();
|
||||
final RoutingTable routingTable = new RoutingTable.Builder().addAsRecovery(indexMetaData).build();
|
||||
assertThat(routingTable.hasIndex(TEST_INDEX_1), is(false));
|
||||
expectThrows(IndexNotFoundException.class, () -> routingTable.allShards(TEST_INDEX_1));
|
||||
}
|
||||
{
|
||||
final IndexMetaData indexMetaData = createIndexMetaData(TEST_INDEX_1).build();
|
||||
final IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData)
|
||||
.state(IndexMetaData.State.CLOSE)
|
||||
.settings(Settings.builder()
|
||||
.put(indexMetaData.getSettings())
|
||||
.put(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)
|
||||
.build())
|
||||
.settingsVersion(indexMetaData.getSettingsVersion() + 1);
|
||||
final RoutingTable routingTable = new RoutingTable.Builder().addAsRecovery(indexMetaDataBuilder.build()).build();
|
||||
assertThat(routingTable.hasIndex(TEST_INDEX_1), is(true));
|
||||
assertThat(routingTable.allShards(TEST_INDEX_1).size(), is(this.shardsPerIndex));
|
||||
assertThat(routingTable.index(TEST_INDEX_1).shardsWithState(UNASSIGNED).size(), is(this.shardsPerIndex));
|
||||
}
|
||||
}
|
||||
|
||||
/** reverse engineer the in sync aid based on the given indexRoutingTable **/
|
||||
public static IndexMetaData updateActiveAllocations(IndexRoutingTable indexRoutingTable, IndexMetaData indexMetaData) {
|
||||
IndexMetaData.Builder imdBuilder = IndexMetaData.builder(indexMetaData);
|
||||
|
@ -40,6 +40,7 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.test.VersionUtils;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
@ -54,6 +55,7 @@ import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class UnassignedInfoTests extends ESAllocationTestCase {
|
||||
|
||||
public void testReasonOrdinalOrder() {
|
||||
UnassignedInfo.Reason[] order = new UnassignedInfo.Reason[]{
|
||||
UnassignedInfo.Reason.INDEX_CREATED,
|
||||
@ -70,7 +72,8 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
||||
UnassignedInfo.Reason.REALLOCATED_REPLICA,
|
||||
UnassignedInfo.Reason.PRIMARY_FAILED,
|
||||
UnassignedInfo.Reason.FORCED_EMPTY_PRIMARY,
|
||||
UnassignedInfo.Reason.MANUAL_ALLOCATION,};
|
||||
UnassignedInfo.Reason.MANUAL_ALLOCATION,
|
||||
UnassignedInfo.Reason.INDEX_CLOSED,};
|
||||
for (int i = 0; i < order.length; i++) {
|
||||
assertThat(order[i].ordinal(), equalTo(i));
|
||||
}
|
||||
@ -95,6 +98,21 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
|
||||
assertThat(read.getNumFailedAllocations(), equalTo(meta.getNumFailedAllocations()));
|
||||
}
|
||||
|
||||
public void testBwcSerialization() throws Exception {
|
||||
final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.INDEX_CLOSED, "message");
|
||||
BytesStreamOutput out = new BytesStreamOutput();
|
||||
out.setVersion(VersionUtils.randomVersionBetween(random(), Version.V_6_0_0, VersionUtils.getPreviousVersion(Version.V_7_0_0)));
|
||||
unassignedInfo.writeTo(out);
|
||||
out.close();
|
||||
|
||||
UnassignedInfo read = new UnassignedInfo(out.bytes().streamInput());
|
||||
assertThat(read.getReason(), equalTo(UnassignedInfo.Reason.REINITIALIZED));
|
||||
assertThat(read.getUnassignedTimeInMillis(), equalTo(unassignedInfo.getUnassignedTimeInMillis()));
|
||||
assertThat(read.getMessage(), equalTo(unassignedInfo.getMessage()));
|
||||
assertThat(read.getDetails(), equalTo(unassignedInfo.getDetails()));
|
||||
assertThat(read.getNumFailedAllocations(), equalTo(unassignedInfo.getNumFailedAllocations()));
|
||||
}
|
||||
|
||||
public void testIndexCreated() {
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder("test").settings(settings(Version.CURRENT))
|
||||
|
@ -24,6 +24,7 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.coordination.CoordinationMetaData;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
@ -210,11 +211,36 @@ public class ClusterStateUpdatersTests extends ESTestCase {
|
||||
.build();
|
||||
assertFalse(initialState.routingTable().hasIndex(index));
|
||||
|
||||
final ClusterState newState = updateRoutingTable(initialState);
|
||||
|
||||
assertTrue(newState.routingTable().hasIndex(index));
|
||||
assertThat(newState.routingTable().version(), is(0L));
|
||||
assertThat(newState.routingTable().allShards(index.getName()).size(), is(numOfShards));
|
||||
{
|
||||
final ClusterState newState = updateRoutingTable(initialState);
|
||||
assertTrue(newState.routingTable().hasIndex(index));
|
||||
assertThat(newState.routingTable().version(), is(0L));
|
||||
assertThat(newState.routingTable().allShards(index.getName()).size(), is(numOfShards));
|
||||
}
|
||||
{
|
||||
final ClusterState newState = updateRoutingTable(ClusterState.builder(initialState)
|
||||
.metaData(MetaData.builder(initialState.metaData())
|
||||
.put(IndexMetaData.builder(initialState.metaData().index("test"))
|
||||
.state(IndexMetaData.State.CLOSE))
|
||||
.build())
|
||||
.build());
|
||||
assertFalse(newState.routingTable().hasIndex(index));
|
||||
}
|
||||
{
|
||||
final ClusterState newState = updateRoutingTable(ClusterState.builder(initialState)
|
||||
.metaData(MetaData.builder(initialState.metaData())
|
||||
.put(IndexMetaData.builder(initialState.metaData().index("test"))
|
||||
.state(IndexMetaData.State.CLOSE)
|
||||
.settings(Settings.builder()
|
||||
.put(initialState.metaData().index("test").getSettings())
|
||||
.put(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey(), true)
|
||||
.build())
|
||||
).build())
|
||||
.build());
|
||||
assertTrue(newState.routingTable().hasIndex(index));
|
||||
assertThat(newState.routingTable().version(), is(0L));
|
||||
assertThat(newState.routingTable().allShards(index.getName()).size(), is(numOfShards));
|
||||
}
|
||||
}
|
||||
|
||||
public void testMixCurrentAndRecoveredState() {
|
||||
|
@ -59,11 +59,12 @@ import java.util.concurrent.TimeUnit;
|
||||
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
@ClusterScope(scope = Scope.TEST, numDataNodes = 0)
|
||||
public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
@ -119,11 +120,11 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
client().prepareIndex("test", "type1", "1").setSource("field1", "value1").get();
|
||||
|
||||
logger.info("--> closing test index...");
|
||||
client().admin().indices().prepareClose("test").get();
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
|
||||
|
||||
logger.info("--> verifying that the state is green");
|
||||
ensureGreen();
|
||||
@ -142,7 +143,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> opening the first index again...");
|
||||
client().admin().indices().prepareOpen("test").execute().actionGet();
|
||||
assertAcked(client().admin().indices().prepareOpen("test"));
|
||||
|
||||
logger.info("--> verifying that the state is green");
|
||||
ensureGreen();
|
||||
@ -158,10 +159,10 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
assertThat(getResponse.isExists(), equalTo(true));
|
||||
|
||||
logger.info("--> closing test index...");
|
||||
client().admin().indices().prepareClose("test").execute().actionGet();
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
|
||||
|
||||
logger.info("--> restarting nodes...");
|
||||
internalCluster().fullRestart();
|
||||
@ -170,7 +171,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
|
||||
|
||||
logger.info("--> trying to index into a closed index ...");
|
||||
try {
|
||||
@ -258,11 +259,11 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
logger.info("--> closing test index...");
|
||||
client().admin().indices().prepareClose("test").execute().actionGet();
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
|
||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
|
||||
|
||||
logger.info("--> opening the index...");
|
||||
client().admin().indices().prepareOpen("test").execute().actionGet();
|
||||
|
@ -34,6 +34,7 @@ import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
@ -47,6 +48,7 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.test.InternalSettingsPlugin.TRANSLOG_RETENTION_CHECK_INTERVAL_SETTING;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.core.IsEqual.equalTo;
|
||||
|
||||
/** Unit test(s) for IndexService */
|
||||
@ -109,7 +111,6 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
latch2.get().countDown();
|
||||
assertEquals(2, count.get());
|
||||
|
||||
|
||||
task = new IndexService.BaseAsyncTask(indexService, TimeValue.timeValueMillis(1000000)) {
|
||||
@Override
|
||||
protected void runInternal() {
|
||||
@ -117,6 +118,34 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
};
|
||||
assertTrue(task.mustReschedule());
|
||||
|
||||
// now close the index
|
||||
final Index index = indexService.index();
|
||||
assertAcked(client().admin().indices().prepareClose(index.getName()));
|
||||
awaitBusy(() -> getInstanceFromNode(IndicesService.class).hasIndex(index));
|
||||
|
||||
final IndexService closedIndexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index);
|
||||
assertNotSame(indexService, closedIndexService);
|
||||
assertFalse(task.mustReschedule());
|
||||
assertFalse(task.isClosed());
|
||||
assertEquals(1000000, task.getInterval().millis());
|
||||
|
||||
// now reopen the index
|
||||
assertAcked(client().admin().indices().prepareOpen(index.getName()));
|
||||
awaitBusy(() -> getInstanceFromNode(IndicesService.class).hasIndex(index));
|
||||
indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index);
|
||||
assertNotSame(closedIndexService, indexService);
|
||||
|
||||
task = new IndexService.BaseAsyncTask(indexService, TimeValue.timeValueMillis(100000)) {
|
||||
@Override
|
||||
protected void runInternal() {
|
||||
|
||||
}
|
||||
};
|
||||
assertTrue(task.mustReschedule());
|
||||
assertFalse(task.isClosed());
|
||||
assertTrue(task.isScheduled());
|
||||
|
||||
indexService.close("simon says", false);
|
||||
assertFalse("no shards left", task.mustReschedule());
|
||||
assertTrue(task.isScheduled());
|
||||
@ -124,7 +153,7 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
assertFalse(task.isScheduled());
|
||||
}
|
||||
|
||||
public void testRefreshTaskIsUpdated() throws IOException {
|
||||
public void testRefreshTaskIsUpdated() throws Exception {
|
||||
IndexService indexService = createIndex("test", Settings.EMPTY);
|
||||
IndexService.AsyncRefreshTask refreshTask = indexService.getRefreshTask();
|
||||
assertEquals(1000, refreshTask.getInterval().millis());
|
||||
@ -167,12 +196,35 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
assertTrue(refreshTask.isScheduled());
|
||||
assertFalse(refreshTask.isClosed());
|
||||
assertEquals(200, refreshTask.getInterval().millis());
|
||||
|
||||
// now close the index
|
||||
final Index index = indexService.index();
|
||||
assertAcked(client().admin().indices().prepareClose(index.getName()));
|
||||
awaitBusy(() -> getInstanceFromNode(IndicesService.class).hasIndex(index));
|
||||
|
||||
final IndexService closedIndexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index);
|
||||
assertNotSame(indexService, closedIndexService);
|
||||
assertNotSame(refreshTask, closedIndexService.getRefreshTask());
|
||||
assertFalse(closedIndexService.getRefreshTask().mustReschedule());
|
||||
assertFalse(closedIndexService.getRefreshTask().isClosed());
|
||||
assertEquals(200, closedIndexService.getRefreshTask().getInterval().millis());
|
||||
|
||||
// now reopen the index
|
||||
assertAcked(client().admin().indices().prepareOpen(index.getName()));
|
||||
awaitBusy(() -> getInstanceFromNode(IndicesService.class).hasIndex(index));
|
||||
indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index);
|
||||
assertNotSame(closedIndexService, indexService);
|
||||
refreshTask = indexService.getRefreshTask();
|
||||
assertTrue(indexService.getRefreshTask().mustReschedule());
|
||||
assertTrue(refreshTask.isScheduled());
|
||||
assertFalse(refreshTask.isClosed());
|
||||
|
||||
indexService.close("simon says", false);
|
||||
assertFalse(refreshTask.isScheduled());
|
||||
assertTrue(refreshTask.isClosed());
|
||||
}
|
||||
|
||||
public void testFsyncTaskIsRunning() throws IOException {
|
||||
public void testFsyncTaskIsRunning() throws Exception {
|
||||
Settings settings = Settings.builder()
|
||||
.put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.ASYNC).build();
|
||||
IndexService indexService = createIndex("test", settings);
|
||||
@ -182,6 +234,28 @@ public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
assertTrue(fsyncTask.mustReschedule());
|
||||
assertTrue(fsyncTask.isScheduled());
|
||||
|
||||
// now close the index
|
||||
final Index index = indexService.index();
|
||||
assertAcked(client().admin().indices().prepareClose(index.getName()));
|
||||
awaitBusy(() -> getInstanceFromNode(IndicesService.class).hasIndex(index));
|
||||
|
||||
final IndexService closedIndexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index);
|
||||
assertNotSame(indexService, closedIndexService);
|
||||
assertNotSame(fsyncTask, closedIndexService.getFsyncTask());
|
||||
assertFalse(closedIndexService.getFsyncTask().mustReschedule());
|
||||
assertFalse(closedIndexService.getFsyncTask().isClosed());
|
||||
assertEquals(5000, closedIndexService.getFsyncTask().getInterval().millis());
|
||||
|
||||
// now reopen the index
|
||||
assertAcked(client().admin().indices().prepareOpen(index.getName()));
|
||||
awaitBusy(() -> getInstanceFromNode(IndicesService.class).hasIndex(index));
|
||||
indexService = getInstanceFromNode(IndicesService.class).indexServiceSafe(index);
|
||||
assertNotSame(closedIndexService, indexService);
|
||||
fsyncTask = indexService.getFsyncTask();
|
||||
assertTrue(indexService.getRefreshTask().mustReschedule());
|
||||
assertTrue(fsyncTask.isScheduled());
|
||||
assertFalse(fsyncTask.isClosed());
|
||||
|
||||
indexService.close("simon says", false);
|
||||
assertFalse(fsyncTask.isScheduled());
|
||||
assertTrue(fsyncTask.isClosed());
|
||||
|
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.ExistingStoreRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRoutingHelper.initWithSameId;
|
||||
|
||||
public class NoOpEngineRecoveryTests extends IndexShardTestCase {
|
||||
|
||||
public void testRecoverFromNoOp() throws IOException {
|
||||
final int nbDocs = scaledRandomIntBetween(1, 100);
|
||||
|
||||
final IndexShard indexShard = newStartedShard(true);
|
||||
for (int i = 0; i < nbDocs; i++) {
|
||||
indexDoc(indexShard, "_doc", String.valueOf(i));
|
||||
}
|
||||
indexShard.close("test", true);
|
||||
|
||||
final ShardRouting shardRouting = indexShard.routingEntry();
|
||||
IndexShard primary = reinitShard(indexShard, initWithSameId(shardRouting, ExistingStoreRecoverySource.INSTANCE), NoOpEngine::new);
|
||||
recoverShardFromStore(primary);
|
||||
assertEquals(primary.seqNoStats().getMaxSeqNo(), primary.getMaxSeqNoOfUpdatesOrDeletes());
|
||||
assertEquals(nbDocs, primary.docStats().getCount());
|
||||
|
||||
IndexShard replica = newShard(false, Settings.EMPTY, NoOpEngine::new);
|
||||
recoverReplica(replica, primary, true);
|
||||
assertEquals(replica.seqNoStats().getMaxSeqNo(), replica.getMaxSeqNoOfUpdatesOrDeletes());
|
||||
assertEquals(nbDocs, replica.docStats().getCount());
|
||||
closeShards(primary, replica);
|
||||
}
|
||||
}
|
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.engine;
|
||||
|
||||
import org.apache.lucene.index.DirectoryReader;
|
||||
import org.apache.lucene.index.IndexReader;
|
||||
import org.apache.lucene.index.NoMergePolicy;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.core.internal.io.IOUtils;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.seqno.ReplicationTracker;
|
||||
import org.elasticsearch.index.seqno.SequenceNumbers;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.index.translog.TranslogDeletionPolicy;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collections;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
|
||||
public class NoOpEngineTests extends EngineTestCase {
|
||||
private static final IndexSettings INDEX_SETTINGS = IndexSettingsModule.newIndexSettings("index", Settings.EMPTY);
|
||||
|
||||
public void testNoopEngine() throws IOException {
|
||||
engine.close();
|
||||
final NoOpEngine engine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir));
|
||||
expectThrows(UnsupportedOperationException.class, () -> engine.syncFlush(null, null));
|
||||
assertThat(engine.refreshNeeded(), equalTo(false));
|
||||
assertThat(engine.shouldPeriodicallyFlush(), equalTo(false));
|
||||
engine.close();
|
||||
}
|
||||
|
||||
public void testTwoNoopEngines() throws IOException {
|
||||
engine.close();
|
||||
// Ensure that we can't open two noop engines for the same store
|
||||
final EngineConfig engineConfig = noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir);
|
||||
try (NoOpEngine ignored = new NoOpEngine(engineConfig)) {
|
||||
UncheckedIOException e = expectThrows(UncheckedIOException.class, () -> new NoOpEngine(engineConfig));
|
||||
assertThat(e.getCause(), instanceOf(LockObtainFailedException.class));
|
||||
}
|
||||
}
|
||||
|
||||
public void testNoopAfterRegularEngine() throws IOException {
|
||||
int docs = randomIntBetween(1, 10);
|
||||
ReplicationTracker tracker = (ReplicationTracker) engine.config().getGlobalCheckpointSupplier();
|
||||
ShardRouting routing = TestShardRouting.newShardRouting("test", shardId.id(), "node",
|
||||
null, true, ShardRoutingState.STARTED, allocationId);
|
||||
IndexShardRoutingTable table = new IndexShardRoutingTable.Builder(shardId).addShard(routing).build();
|
||||
tracker.updateFromMaster(1L, Collections.singleton(allocationId.getId()), table, Collections.emptySet());
|
||||
tracker.activatePrimaryMode(SequenceNumbers.NO_OPS_PERFORMED);
|
||||
for (int i = 0; i < docs; i++) {
|
||||
ParsedDocument doc = testParsedDocument("" + i, null, testDocumentWithTextField(), B_1, null);
|
||||
engine.index(indexForDoc(doc));
|
||||
tracker.updateLocalCheckpoint(allocationId.getId(), i);
|
||||
}
|
||||
|
||||
flushAndTrimTranslog(engine);
|
||||
|
||||
long localCheckpoint = engine.getLocalCheckpoint();
|
||||
long maxSeqNo = engine.getSeqNoStats(100L).getMaxSeqNo();
|
||||
engine.close();
|
||||
|
||||
final NoOpEngine noOpEngine = new NoOpEngine(noOpConfig(INDEX_SETTINGS, store, primaryTranslogDir, tracker));
|
||||
assertThat(noOpEngine.getLocalCheckpoint(), equalTo(localCheckpoint));
|
||||
assertThat(noOpEngine.getSeqNoStats(100L).getMaxSeqNo(), equalTo(maxSeqNo));
|
||||
try (Engine.IndexCommitRef ref = noOpEngine.acquireLastIndexCommit(false)) {
|
||||
try (IndexReader reader = DirectoryReader.open(ref.getIndexCommit())) {
|
||||
assertThat(reader.numDocs(), equalTo(docs));
|
||||
}
|
||||
}
|
||||
noOpEngine.close();
|
||||
}
|
||||
|
||||
public void testNoOpEngineDocStats() throws Exception {
|
||||
IOUtils.close(engine, store);
|
||||
final AtomicLong globalCheckpoint = new AtomicLong(SequenceNumbers.NO_OPS_PERFORMED);
|
||||
try (Store store = createStore()) {
|
||||
Path translogPath = createTempDir();
|
||||
EngineConfig config = config(defaultSettings, store, translogPath, NoMergePolicy.INSTANCE, null, null, globalCheckpoint::get);
|
||||
final int numDocs = scaledRandomIntBetween(10, 3000);
|
||||
int deletions = 0;
|
||||
try (InternalEngine engine = createEngine(config)) {
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
engine.index(indexForDoc(createParsedDoc(Integer.toString(i), null)));
|
||||
if (rarely()) {
|
||||
engine.flush();
|
||||
}
|
||||
globalCheckpoint.set(engine.getLocalCheckpoint());
|
||||
}
|
||||
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
if (randomBoolean()) {
|
||||
String delId = Integer.toString(i);
|
||||
Engine.DeleteResult result = engine.delete(new Engine.Delete("test", delId, newUid(delId), primaryTerm.get()));
|
||||
assertTrue(result.isFound());
|
||||
globalCheckpoint.set(engine.getLocalCheckpoint());
|
||||
deletions += 1;
|
||||
}
|
||||
}
|
||||
engine.getLocalCheckpointTracker().waitForOpsToComplete(numDocs + deletions - 1);
|
||||
flushAndTrimTranslog(engine);
|
||||
}
|
||||
|
||||
final DocsStats expectedDocStats;
|
||||
try (InternalEngine engine = createEngine(config)) {
|
||||
expectedDocStats = engine.docStats();
|
||||
}
|
||||
|
||||
try (NoOpEngine noOpEngine = new NoOpEngine(config)) {
|
||||
assertEquals(expectedDocStats.getCount(), noOpEngine.docStats().getCount());
|
||||
assertEquals(expectedDocStats.getDeleted(), noOpEngine.docStats().getDeleted());
|
||||
assertEquals(expectedDocStats.getTotalSizeInBytes(), noOpEngine.docStats().getTotalSizeInBytes());
|
||||
assertEquals(expectedDocStats.getAverageSizeInBytes(), noOpEngine.docStats().getAverageSizeInBytes());
|
||||
} catch (AssertionError e) {
|
||||
logger.error(config.getMergePolicy());
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void flushAndTrimTranslog(final InternalEngine engine) {
|
||||
engine.flush(true, true);
|
||||
final TranslogDeletionPolicy deletionPolicy = engine.getTranslog().getDeletionPolicy();
|
||||
deletionPolicy.setRetentionSizeInBytes(-1);
|
||||
deletionPolicy.setRetentionAgeInMillis(-1);
|
||||
deletionPolicy.setMinTranslogGenerationForRecovery(engine.getTranslog().getGeneration().translogFileGeneration);
|
||||
engine.flush(true, true);
|
||||
}
|
||||
}
|
@ -38,7 +38,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.CheckedRunnable;
|
||||
@ -62,6 +61,7 @@ import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.CommitStats;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.engine.NoOpEngine;
|
||||
import org.elasticsearch.index.engine.SegmentsStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
@ -87,7 +87,6 @@ import java.io.IOException;
|
||||
import java.io.UncheckedIOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
@ -103,14 +102,16 @@ import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.Predicate;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomAsciiLettersOfLength;
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.cluster.routing.TestShardRouting.newShardRouting;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.NO_OPS_PERFORMED;
|
||||
import static org.elasticsearch.index.seqno.SequenceNumbers.UNASSIGNED_SEQ_NO;
|
||||
import static org.elasticsearch.index.shard.IndexShardTestCase.getTranslog;
|
||||
@ -279,73 +280,63 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||
}
|
||||
|
||||
public void testIndexCanChangeCustomDataPath() throws Exception {
|
||||
Environment env = getInstanceFromNode(Environment.class);
|
||||
Path idxPath = env.sharedDataFile().resolve(randomAlphaOfLength(10));
|
||||
final String INDEX = "idx";
|
||||
Path startDir = idxPath.resolve("start-" + randomAlphaOfLength(10));
|
||||
Path endDir = idxPath.resolve("end-" + randomAlphaOfLength(10));
|
||||
logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString());
|
||||
logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString());
|
||||
// temp dirs are automatically created, but the end dir is what
|
||||
// startDir is going to be renamed as, so it needs to be deleted
|
||||
// otherwise we get all sorts of errors about the directory
|
||||
// already existing
|
||||
IOUtils.rm(endDir);
|
||||
final String index = "test-custom-data-path";
|
||||
final Path sharedDataPath = getInstanceFromNode(Environment.class).sharedDataFile().resolve(randomAsciiLettersOfLength(10));
|
||||
final Path indexDataPath = sharedDataPath.resolve("start-" + randomAsciiLettersOfLength(10));
|
||||
|
||||
Settings sb = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
|
||||
.build();
|
||||
Settings sb2 = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
|
||||
.build();
|
||||
logger.info("--> creating index [{}] with data_path [{}]", index, indexDataPath);
|
||||
createIndex(index, Settings.builder().put(IndexMetaData.SETTING_DATA_PATH, indexDataPath.toAbsolutePath().toString()).build());
|
||||
client().prepareIndex(index, "bar", "1").setSource("foo", "bar").setRefreshPolicy(IMMEDIATE).get();
|
||||
ensureGreen(index);
|
||||
|
||||
logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
|
||||
createIndex(INDEX, sb);
|
||||
ensureGreen(INDEX);
|
||||
client().prepareIndex(INDEX, "bar", "1").setSource("{}", XContentType.JSON).setRefreshPolicy(IMMEDIATE).get();
|
||||
assertHitCount(client().prepareSearch(index).setSize(0).get(), 1L);
|
||||
|
||||
SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
|
||||
assertThat("found the hit", resp.getHits().getTotalHits().value, equalTo(1L));
|
||||
|
||||
logger.info("--> closing the index [{}]", INDEX);
|
||||
client().admin().indices().prepareClose(INDEX).get();
|
||||
logger.info("--> closing the index [{}]", index);
|
||||
assertAcked(client().admin().indices().prepareClose(index));
|
||||
logger.info("--> index closed, re-opening...");
|
||||
client().admin().indices().prepareOpen(INDEX).get();
|
||||
assertAcked(client().admin().indices().prepareOpen(index));
|
||||
logger.info("--> index re-opened");
|
||||
ensureGreen(INDEX);
|
||||
ensureGreen(index);
|
||||
|
||||
resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
|
||||
assertThat("found the hit", resp.getHits().getTotalHits().value, equalTo(1L));
|
||||
assertHitCount(client().prepareSearch(index).setSize(0).get(), 1L);
|
||||
|
||||
// Now, try closing and changing the settings
|
||||
logger.info("--> closing the index [{}] before updating data_path", index);
|
||||
assertAcked(client().admin().indices().prepareClose(index));
|
||||
|
||||
logger.info("--> closing the index [{}]", INDEX);
|
||||
client().admin().indices().prepareClose(INDEX).get();
|
||||
final Path newIndexDataPath = sharedDataPath.resolve("end-" + randomAlphaOfLength(10));
|
||||
IOUtils.rm(newIndexDataPath);
|
||||
|
||||
logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName());
|
||||
assert Files.exists(endDir) == false : "end directory should not exist!";
|
||||
Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING);
|
||||
logger.info("--> copying data on disk from [{}] to [{}]", indexDataPath, newIndexDataPath);
|
||||
assert Files.exists(newIndexDataPath) == false : "new index data path directory should not exist!";
|
||||
try (Stream<Path> stream = Files.walk(indexDataPath)) {
|
||||
stream.forEach(path -> {
|
||||
try {
|
||||
if (path.endsWith(".lock") == false) {
|
||||
Files.copy(path, newIndexDataPath.resolve(indexDataPath.relativize(path)));
|
||||
}
|
||||
} catch (final Exception e) {
|
||||
logger.error("Failed to copy data path directory", e);
|
||||
fail();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
logger.info("--> updating settings...");
|
||||
client().admin().indices().prepareUpdateSettings(INDEX)
|
||||
.setSettings(sb2)
|
||||
.setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
|
||||
.get();
|
||||
|
||||
assert Files.exists(startDir) == false : "start dir shouldn't exist";
|
||||
logger.info("--> updating data_path to [{}] for index [{}]", newIndexDataPath, index);
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings(index)
|
||||
.setSettings(Settings.builder().put(IndexMetaData.SETTING_DATA_PATH, newIndexDataPath.toAbsolutePath().toString()).build())
|
||||
.setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true)));
|
||||
|
||||
logger.info("--> settings updated and files moved, re-opening index");
|
||||
client().admin().indices().prepareOpen(INDEX).get();
|
||||
assertAcked(client().admin().indices().prepareOpen(index));
|
||||
logger.info("--> index re-opened");
|
||||
ensureGreen(INDEX);
|
||||
ensureGreen(index);
|
||||
|
||||
resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
|
||||
assertThat("found the hit", resp.getHits().getTotalHits().value, equalTo(1L));
|
||||
assertHitCount(client().prepareSearch(index).setSize(0).get(), 1L);
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete(INDEX));
|
||||
assertAcked(client().admin().indices().prepareDelete(index));
|
||||
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
|
||||
assertPathHasBeenCleared(startDir.toAbsolutePath());
|
||||
assertPathHasBeenCleared(endDir.toAbsolutePath());
|
||||
assertPathHasBeenCleared(newIndexDataPath.toAbsolutePath());
|
||||
}
|
||||
|
||||
public void testMaybeFlush() throws Exception {
|
||||
@ -682,7 +673,7 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||
}
|
||||
|
||||
private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) {
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(),
|
||||
ShardRouting shardRouting = newShardRouting(existingShardRouting.shardId(),
|
||||
existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING,
|
||||
existingShardRouting.allocationId());
|
||||
shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"),
|
||||
@ -919,4 +910,28 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
||||
assertThat(opsFromLucene, equalTo(opsFromTranslog));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that the {@link org.elasticsearch.index.engine.NoOpEngine} takes precedence over other
|
||||
* engine factories if the index is closed.
|
||||
*/
|
||||
public void testNoOpEngineFactoryTakesPrecedence() {
|
||||
final String indexName = "closed-index";
|
||||
createIndex(indexName, Settings.builder().put("index.number_of_shards", 1).put("index.number_of_replicas", 0).build());
|
||||
ensureGreen();
|
||||
|
||||
assertAcked(client().admin().indices().prepareClose(indexName));
|
||||
ensureGreen();
|
||||
|
||||
final ClusterService clusterService = getInstanceFromNode(ClusterService.class);
|
||||
final ClusterState clusterState = clusterService.state();
|
||||
|
||||
final IndexMetaData indexMetaData = clusterState.metaData().index(indexName);
|
||||
final IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
final IndexService indexService = indicesService.indexServiceSafe(indexMetaData.getIndex());
|
||||
|
||||
for (IndexShard indexShard : indexService) {
|
||||
assertThat(indexShard.getEngine(), instanceOf(NoOpEngine.class));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
package org.elasticsearch.indices;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -211,8 +212,13 @@ public class IndicesLifecycleListenerIT extends ESIntegTestCase {
|
||||
assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1), equalTo(6));
|
||||
assertThat(stateChangeListenerNode1.afterCloseSettings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1), equalTo(1));
|
||||
|
||||
assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED);
|
||||
assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED);
|
||||
if (Version.CURRENT.onOrAfter(Version.V_7_1_0)) {
|
||||
assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED);
|
||||
assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED, CREATED, RECOVERING, POST_RECOVERY, STARTED);
|
||||
} else {
|
||||
assertShardStatesMatch(stateChangeListenerNode1, 6, CLOSED);
|
||||
assertShardStatesMatch(stateChangeListenerNode2, 6, CLOSED);
|
||||
}
|
||||
}
|
||||
|
||||
private static void assertShardStatesMatch(final IndexShardStateChangeListener stateChangeListener,
|
||||
|
@ -80,7 +80,6 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
@ -272,57 +271,66 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
|
||||
public void testPendingTasks() throws Exception {
|
||||
IndicesService indicesService = getIndicesService();
|
||||
IndexService test = createIndex("test");
|
||||
final IndexService indexService = createIndex("test");
|
||||
final Index index = indexService.index();
|
||||
final IndexSettings indexSettings = indexService.getIndexSettings();
|
||||
|
||||
assertTrue(test.hasShard(0));
|
||||
ShardPath path = test.getShardOrNull(0).shardPath();
|
||||
assertTrue(test.getShardOrNull(0).routingEntry().started());
|
||||
ShardPath shardPath = ShardPath.loadShardPath(logger, getNodeEnvironment(), new ShardId(test.index(), 0), test.getIndexSettings());
|
||||
assertEquals(shardPath, path);
|
||||
try {
|
||||
indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));
|
||||
fail("can't get lock");
|
||||
} catch (ShardLockObtainFailedException ex) {
|
||||
final IndexShard indexShard = indexService.getShardOrNull(0);
|
||||
assertNotNull(indexShard);
|
||||
assertTrue(indexShard.routingEntry().started());
|
||||
|
||||
}
|
||||
assertTrue(path.exists());
|
||||
final ShardPath shardPath = indexShard.shardPath();
|
||||
assertEquals(ShardPath.loadShardPath(logger, getNodeEnvironment(), indexShard.shardId(), indexSettings), shardPath);
|
||||
|
||||
final IndicesService indicesService = getIndicesService();
|
||||
expectThrows(ShardLockObtainFailedException.class, () ->
|
||||
indicesService.processPendingDeletes(index, indexSettings, TimeValue.timeValueMillis(0)));
|
||||
assertTrue(shardPath.exists());
|
||||
|
||||
int numPending = 1;
|
||||
if (randomBoolean()) {
|
||||
indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());
|
||||
indicesService.addPendingDelete(indexShard.shardId(), indexSettings);
|
||||
} else {
|
||||
if (randomBoolean()) {
|
||||
numPending++;
|
||||
indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());
|
||||
indicesService.addPendingDelete(indexShard.shardId(), indexSettings);
|
||||
}
|
||||
indicesService.addPendingDelete(test.index(), test.getIndexSettings());
|
||||
indicesService.addPendingDelete(index, indexSettings);
|
||||
}
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
assertTrue(path.exists());
|
||||
|
||||
assertEquals(indicesService.numPendingDeletes(test.index()), numPending);
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
assertTrue(shardPath.exists());
|
||||
ensureGreen("test");
|
||||
|
||||
assertEquals(indicesService.numPendingDeletes(index), numPending);
|
||||
assertTrue(indicesService.hasUncompletedPendingDeletes());
|
||||
|
||||
// shard lock released... we can now delete
|
||||
indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));
|
||||
assertEquals(indicesService.numPendingDeletes(test.index()), 0);
|
||||
assertFalse(indicesService.hasUncompletedPendingDeletes());
|
||||
assertFalse(path.exists());
|
||||
expectThrows(ShardLockObtainFailedException.class, () ->
|
||||
indicesService.processPendingDeletes(index, indexSettings, TimeValue.timeValueMillis(0)));
|
||||
|
||||
if (randomBoolean()) {
|
||||
indicesService.addPendingDelete(new ShardId(test.index(), 0), test.getIndexSettings());
|
||||
indicesService.addPendingDelete(new ShardId(test.index(), 1), test.getIndexSettings());
|
||||
indicesService.addPendingDelete(new ShardId("bogus", "_na_", 1), test.getIndexSettings());
|
||||
assertEquals(indicesService.numPendingDeletes(test.index()), 2);
|
||||
assertEquals(indicesService.numPendingDeletes(index), numPending);
|
||||
assertTrue(indicesService.hasUncompletedPendingDeletes());
|
||||
|
||||
final boolean hasBogus = randomBoolean();
|
||||
if (hasBogus) {
|
||||
indicesService.addPendingDelete(new ShardId(index, 0), indexSettings);
|
||||
indicesService.addPendingDelete(new ShardId(index, 1), indexSettings);
|
||||
indicesService.addPendingDelete(new ShardId("bogus", "_na_", 1), indexSettings);
|
||||
assertEquals(indicesService.numPendingDeletes(index), numPending + 2);
|
||||
assertTrue(indicesService.hasUncompletedPendingDeletes());
|
||||
// shard lock released... we can now delete
|
||||
indicesService.processPendingDeletes(test.index(), test.getIndexSettings(), new TimeValue(0, TimeUnit.MILLISECONDS));
|
||||
assertEquals(indicesService.numPendingDeletes(test.index()), 0);
|
||||
assertTrue(indicesService.hasUncompletedPendingDeletes()); // "bogus" index has not been removed
|
||||
}
|
||||
assertAcked(client().admin().indices().prepareOpen("test").setTimeout(TimeValue.timeValueSeconds(1)));
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete("test"));
|
||||
assertBusy(() -> {
|
||||
try {
|
||||
indicesService.processPendingDeletes(index, indexSettings, TimeValue.timeValueMillis(0));
|
||||
assertEquals(indicesService.numPendingDeletes(index), 0);
|
||||
} catch (final Exception e) {
|
||||
fail(e.getMessage());
|
||||
}
|
||||
});
|
||||
assertThat(indicesService.hasUncompletedPendingDeletes(), equalTo(hasBogus)); // "bogus" index has not been removed
|
||||
assertFalse(shardPath.exists());
|
||||
}
|
||||
|
||||
public void testVerifyIfIndexContentDeleted() throws Exception {
|
||||
@ -551,7 +559,7 @@ public class IndicesServiceTests extends ESSingleNodeTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testConflictingEngineFactories() throws IOException {
|
||||
public void testConflictingEngineFactories() {
|
||||
final String indexName = "foobar";
|
||||
final Index index = new Index(indexName, UUIDs.randomBase64UUID());
|
||||
final Settings settings = Settings.builder()
|
||||
|
@ -54,6 +54,7 @@ import org.elasticsearch.node.RecoverySettingsChunkSizePlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.BackgroundIndexer;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
@ -209,24 +210,34 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
||||
}
|
||||
|
||||
public void testReplicaRecovery() throws Exception {
|
||||
logger.info("--> start node A");
|
||||
String nodeA = internalCluster().startNode();
|
||||
final String nodeA = internalCluster().startNode();
|
||||
createIndex(INDEX_NAME, Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, SHARD_COUNT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, REPLICA_COUNT)
|
||||
.build());
|
||||
ensureGreen(INDEX_NAME);
|
||||
|
||||
logger.info("--> create index on node: {}", nodeA);
|
||||
createAndPopulateIndex(INDEX_NAME, 1, SHARD_COUNT, REPLICA_COUNT);
|
||||
final int numOfDocs = scaledRandomIntBetween(0, 200);
|
||||
try (BackgroundIndexer indexer = new BackgroundIndexer(INDEX_NAME, "_doc", client(), numOfDocs)) {
|
||||
waitForDocs(numOfDocs, indexer);
|
||||
}
|
||||
|
||||
logger.info("--> start node B");
|
||||
String nodeB = internalCluster().startNode();
|
||||
ensureGreen();
|
||||
refresh(INDEX_NAME);
|
||||
assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numOfDocs);
|
||||
|
||||
final boolean closedIndex = randomBoolean();
|
||||
if (closedIndex) {
|
||||
assertAcked(client().admin().indices().prepareClose(INDEX_NAME));
|
||||
ensureGreen(INDEX_NAME);
|
||||
}
|
||||
|
||||
// force a shard recovery from nodeA to nodeB
|
||||
logger.info("--> bump replica count");
|
||||
client().admin().indices().prepareUpdateSettings(INDEX_NAME)
|
||||
.setSettings(Settings.builder().put("number_of_replicas", 1)).execute().actionGet();
|
||||
ensureGreen();
|
||||
final String nodeB = internalCluster().startNode();
|
||||
assertAcked(client().admin().indices().prepareUpdateSettings(INDEX_NAME)
|
||||
.setSettings(Settings.builder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)));
|
||||
ensureGreen(INDEX_NAME);
|
||||
|
||||
logger.info("--> request recoveries");
|
||||
RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
|
||||
final RecoveryResponse response = client().admin().indices().prepareRecoveries(INDEX_NAME).execute().actionGet();
|
||||
|
||||
// we should now have two total shards, one primary and one replica
|
||||
List<RecoveryState> recoveryStates = response.shardRecoveryStates().get(INDEX_NAME);
|
||||
@ -238,14 +249,27 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
||||
assertThat(nodeBResponses.size(), equalTo(1));
|
||||
|
||||
// validate node A recovery
|
||||
RecoveryState nodeARecoveryState = nodeAResponses.get(0);
|
||||
assertRecoveryState(nodeARecoveryState, 0, RecoverySource.EmptyStoreRecoverySource.INSTANCE, true, Stage.DONE, null, nodeA);
|
||||
final RecoveryState nodeARecoveryState = nodeAResponses.get(0);
|
||||
final RecoverySource expectedRecoverySource;
|
||||
if (closedIndex == false) {
|
||||
expectedRecoverySource = RecoverySource.EmptyStoreRecoverySource.INSTANCE;
|
||||
} else {
|
||||
expectedRecoverySource = RecoverySource.ExistingStoreRecoverySource.INSTANCE;
|
||||
}
|
||||
assertRecoveryState(nodeARecoveryState, 0, expectedRecoverySource, true, Stage.DONE, null, nodeA);
|
||||
validateIndexRecoveryState(nodeARecoveryState.getIndex());
|
||||
|
||||
// validate node B recovery
|
||||
RecoveryState nodeBRecoveryState = nodeBResponses.get(0);
|
||||
final RecoveryState nodeBRecoveryState = nodeBResponses.get(0);
|
||||
assertRecoveryState(nodeBRecoveryState, 0, PeerRecoverySource.INSTANCE, false, Stage.DONE, nodeA, nodeB);
|
||||
validateIndexRecoveryState(nodeBRecoveryState.getIndex());
|
||||
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(nodeA));
|
||||
|
||||
if (closedIndex) {
|
||||
assertAcked(client().admin().indices().prepareOpen(INDEX_NAME));
|
||||
}
|
||||
assertHitCount(client().prepareSearch(INDEX_NAME).setSize(0).get(), numOfDocs);
|
||||
}
|
||||
|
||||
@TestLogging(
|
||||
|
@ -0,0 +1,87 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.indices.recovery;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.test.BackgroundIndexer;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.InternalTestCluster;
|
||||
|
||||
import java.util.Locale;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(numDataNodes = 2)
|
||||
public class ReplicaToPrimaryPromotionIT extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected int numberOfReplicas() {
|
||||
return 1;
|
||||
}
|
||||
|
||||
public void testPromoteReplicaToPrimary() throws Exception {
|
||||
final String indexName = randomAlphaOfLength(5).toLowerCase(Locale.ROOT);
|
||||
createIndex(indexName);
|
||||
|
||||
final int numOfDocs = scaledRandomIntBetween(0, 200);
|
||||
try (BackgroundIndexer indexer = new BackgroundIndexer(indexName, "_doc", client(), numOfDocs)) {
|
||||
waitForDocs(numOfDocs, indexer);
|
||||
}
|
||||
refresh(indexName);
|
||||
|
||||
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs);
|
||||
ensureGreen(indexName);
|
||||
|
||||
// sometimes test with a closed index
|
||||
final IndexMetaData.State indexState = randomFrom(IndexMetaData.State.OPEN, IndexMetaData.State.CLOSE);
|
||||
if (indexState == IndexMetaData.State.CLOSE) {
|
||||
assertAcked(client().admin().indices().prepareClose(indexName));
|
||||
ensureGreen(indexName);
|
||||
}
|
||||
|
||||
// pick up a data node that contains a random primary shard
|
||||
ClusterState state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState();
|
||||
final int numShards = state.metaData().index(indexName).getNumberOfShards();
|
||||
final ShardRouting primaryShard = state.routingTable().index(indexName).shard(randomIntBetween(0, numShards - 1)).primaryShard();
|
||||
final DiscoveryNode randomNode = state.nodes().resolveNode(primaryShard.currentNodeId());
|
||||
|
||||
// stop the random data node, all remaining shards are promoted to primaries
|
||||
internalCluster().stopRandomNode(InternalTestCluster.nameFilter(randomNode.getName()));
|
||||
ensureYellowAndNoInitializingShards(indexName);
|
||||
|
||||
state = client(internalCluster().getMasterName()).admin().cluster().prepareState().get().getState();
|
||||
for (IndexShardRoutingTable shardRoutingTable : state.routingTable().index(indexName)) {
|
||||
for (ShardRouting shardRouting : shardRoutingTable.activeShards()) {
|
||||
assertThat(shardRouting + " should be promoted as a primary", shardRouting.primary(), is(true));
|
||||
}
|
||||
}
|
||||
|
||||
if (indexState == IndexMetaData.State.CLOSE) {
|
||||
assertAcked(client().admin().indices().prepareOpen(indexName));
|
||||
ensureYellowAndNoInitializingShards(indexName);
|
||||
}
|
||||
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), numOfDocs);
|
||||
}
|
||||
}
|
@ -20,9 +20,11 @@ package org.elasticsearch.indices.state;
|
||||
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
@ -49,7 +51,6 @@ import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class CloseIndexIT extends ESIntegTestCase {
|
||||
|
||||
@ -113,7 +114,8 @@ public class CloseIndexIT extends ESIntegTestCase {
|
||||
assertIndexIsClosed(indexName);
|
||||
|
||||
// Second close should be acked too
|
||||
assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName)));
|
||||
final ActiveShardCount activeShardCount = randomFrom(ActiveShardCount.NONE, ActiveShardCount.DEFAULT, ActiveShardCount.ALL);
|
||||
assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(activeShardCount)));
|
||||
assertIndexIsClosed(indexName);
|
||||
}
|
||||
|
||||
@ -127,7 +129,7 @@ public class CloseIndexIT extends ESIntegTestCase {
|
||||
assertThat(clusterState.metaData().indices().get(indexName).getState(), is(IndexMetaData.State.OPEN));
|
||||
assertThat(clusterState.routingTable().allShards().stream().allMatch(ShardRouting::unassigned), is(true));
|
||||
|
||||
assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName)));
|
||||
assertBusy(() -> assertAcked(client().admin().indices().prepareClose(indexName).setWaitForActiveShards(ActiveShardCount.NONE)));
|
||||
assertIndexIsClosed(indexName);
|
||||
}
|
||||
|
||||
@ -306,11 +308,37 @@ public class CloseIndexIT extends ESIntegTestCase {
|
||||
indexer.totalIndexedDocs());
|
||||
}
|
||||
|
||||
public void testCloseIndexWaitForActiveShards() throws Exception {
|
||||
final String indexName = randomAlphaOfLength(10).toLowerCase(Locale.ROOT);
|
||||
createIndex(indexName, Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0) // no replicas to avoid recoveries that could fail the index closing
|
||||
.build());
|
||||
|
||||
final int nbDocs = randomIntBetween(0, 50);
|
||||
indexRandom(randomBoolean(), false, randomBoolean(), IntStream.range(0, nbDocs)
|
||||
.mapToObj(i -> client().prepareIndex(indexName, "_doc", String.valueOf(i)).setSource("num", i)).collect(toList()));
|
||||
ensureGreen(indexName);
|
||||
|
||||
final CloseIndexResponse closeIndexResponse = client().admin().indices()
|
||||
.prepareClose(indexName)
|
||||
.setWaitForActiveShards(ActiveShardCount.DEFAULT)
|
||||
.get();
|
||||
assertThat(client().admin().cluster().prepareHealth(indexName).get().getStatus(), is(ClusterHealthStatus.GREEN));
|
||||
assertTrue(closeIndexResponse.isAcknowledged());
|
||||
assertTrue(closeIndexResponse.isShardsAcknowledged());
|
||||
assertIndexIsClosed(indexName);
|
||||
}
|
||||
|
||||
static void assertIndexIsClosed(final String... indices) {
|
||||
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
for (String index : indices) {
|
||||
assertThat(clusterState.metaData().indices().get(index).getState(), is(IndexMetaData.State.CLOSE));
|
||||
assertThat(clusterState.routingTable().index(index), nullValue());
|
||||
final IndexMetaData indexMetaData = clusterState.metaData().indices().get(index);
|
||||
assertThat(indexMetaData.getState(), is(IndexMetaData.State.CLOSE));
|
||||
final Settings indexSettings = indexMetaData.getSettings();
|
||||
assertThat(indexSettings.hasValue(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(true));
|
||||
assertThat(indexSettings.getAsBoolean(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey(), false), is(true));
|
||||
assertThat(clusterState.routingTable().index(index), notNullValue());
|
||||
assertThat(clusterState.blocks().hasIndexBlock(index, MetaDataIndexStateService.INDEX_CLOSED_BLOCK), is(true));
|
||||
assertThat("Index " + index + " must have only 1 block with [id=" + MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID + "]",
|
||||
clusterState.blocks().indices().getOrDefault(index, emptySet()).stream()
|
||||
@ -321,7 +349,9 @@ public class CloseIndexIT extends ESIntegTestCase {
|
||||
static void assertIndexIsOpened(final String... indices) {
|
||||
final ClusterState clusterState = client().admin().cluster().prepareState().get().getState();
|
||||
for (String index : indices) {
|
||||
assertThat(clusterState.metaData().indices().get(index).getState(), is(IndexMetaData.State.OPEN));
|
||||
final IndexMetaData indexMetaData = clusterState.metaData().indices().get(index);
|
||||
assertThat(indexMetaData.getState(), is(IndexMetaData.State.OPEN));
|
||||
assertThat(indexMetaData.getSettings().hasValue(MetaDataIndexStateService.VERIFIED_BEFORE_CLOSE_SETTING.getKey()), is(false));
|
||||
assertThat(clusterState.routingTable().index(index), notNullValue());
|
||||
assertThat(clusterState.blocks().hasIndexBlock(index, MetaDataIndexStateService.INDEX_CLOSED_BLOCK), is(false));
|
||||
}
|
||||
|
@ -20,8 +20,8 @@
|
||||
package org.elasticsearch.indices.state;
|
||||
|
||||
import org.elasticsearch.action.ActionFuture;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.close.TransportVerifyShardBeforeCloseAction;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
@ -72,7 +72,7 @@ public class ReopenWhileClosingIT extends ESIntegTestCase {
|
||||
final CountDownLatch block = new CountDownLatch(1);
|
||||
final Releasable releaseBlock = interceptVerifyShardBeforeCloseActions(indexName, block::countDown);
|
||||
|
||||
ActionFuture<AcknowledgedResponse> closeIndexResponse = client().admin().indices().prepareClose(indexName).execute();
|
||||
ActionFuture<CloseIndexResponse> closeIndexResponse = client().admin().indices().prepareClose(indexName).execute();
|
||||
assertTrue("Waiting for index to have a closing blocked", block.await(60, TimeUnit.SECONDS));
|
||||
assertIndexIsBlocked(indexName);
|
||||
assertFalse(closeIndexResponse.isDone());
|
||||
@ -96,7 +96,7 @@ public class ReopenWhileClosingIT extends ESIntegTestCase {
|
||||
final CountDownLatch block = new CountDownLatch(1);
|
||||
final Releasable releaseBlock = interceptVerifyShardBeforeCloseActions(randomFrom(indices), block::countDown);
|
||||
|
||||
ActionFuture<AcknowledgedResponse> closeIndexResponse = client().admin().indices().prepareClose("index-*").execute();
|
||||
ActionFuture<CloseIndexResponse> closeIndexResponse = client().admin().indices().prepareClose("index-*").execute();
|
||||
assertTrue("Waiting for index to have a closing blocked", block.await(60, TimeUnit.SECONDS));
|
||||
assertFalse(closeIndexResponse.isDone());
|
||||
indices.forEach(ReopenWhileClosingIT::assertIndexIsBlocked);
|
||||
|
@ -36,7 +36,7 @@ import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(minNumDataNodes = 2)
|
||||
public class SimpleIndexStateIT extends ESIntegTestCase {
|
||||
@ -65,7 +65,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), notNullValue());
|
||||
|
||||
logger.info("--> trying to index into a closed index ...");
|
||||
try {
|
||||
@ -102,7 +102,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
|
||||
assertThat(health.isTimedOut(), equalTo(false));
|
||||
assertThat(health.getStatus(), equalTo(ClusterHealthStatus.RED));
|
||||
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
assertAcked(client().admin().indices().prepareClose("test").setWaitForActiveShards(ActiveShardCount.NONE));
|
||||
|
||||
logger.info("--> updating test index settings to allow allocation");
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
|
||||
|
@ -521,11 +521,10 @@ public class SearchScrollIT extends ESIntegTestCase {
|
||||
assertThat(response.getHits().getHits().length, equalTo(0));
|
||||
}
|
||||
|
||||
public void testCloseAndReopenOrDeleteWithActiveScroll() throws IOException {
|
||||
public void testCloseAndReopenOrDeleteWithActiveScroll() {
|
||||
createIndex("test");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
client().prepareIndex("test", "type1", Integer.toString(i)).setSource(jsonBuilder().startObject().field("field", i).endObject())
|
||||
.get();
|
||||
client().prepareIndex("test", "type1", Integer.toString(i)).setSource("field", i).get();
|
||||
}
|
||||
refresh();
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
@ -541,11 +540,11 @@ public class SearchScrollIT extends ESIntegTestCase {
|
||||
assertThat(((Number) hit.getSortValues()[0]).longValue(), equalTo(counter++));
|
||||
}
|
||||
if (randomBoolean()) {
|
||||
client().admin().indices().prepareClose("test").get();
|
||||
client().admin().indices().prepareOpen("test").get();
|
||||
assertAcked(client().admin().indices().prepareClose("test"));
|
||||
assertAcked(client().admin().indices().prepareOpen("test"));
|
||||
ensureGreen("test");
|
||||
} else {
|
||||
client().admin().indices().prepareDelete("test").get();
|
||||
assertAcked(client().admin().indices().prepareDelete("test"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1556,7 +1556,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
assertAcked(client.admin().indices().prepareClose("test-idx-closed"));
|
||||
ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test-idx-closed").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
|
||||
assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), notNullValue());
|
||||
|
||||
logger.info("--> snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
@ -3752,7 +3752,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
||||
final IndexMetaData restoredIndexMetaData = client().admin().cluster().prepareState().clear().setIndices(indexName)
|
||||
.setMetaData(true).get().getState().metaData().index(indexName);
|
||||
for (int shardId = 0; shardId < numPrimaries; shardId++) {
|
||||
assertThat(restoredIndexMetaData.primaryTerm(shardId), equalTo(primaryTerms.get(shardId) + 1));
|
||||
assertThat(restoredIndexMetaData.primaryTerm(shardId), greaterThan(primaryTerms.get(shardId)));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -697,6 +697,14 @@ public abstract class EngineTestCase extends ESTestCase {
|
||||
tombstoneDocSupplier());
|
||||
}
|
||||
|
||||
protected EngineConfig noOpConfig(IndexSettings indexSettings, Store store, Path translogPath) {
|
||||
return noOpConfig(indexSettings, store, translogPath, null);
|
||||
}
|
||||
|
||||
protected EngineConfig noOpConfig(IndexSettings indexSettings, Store store, Path translogPath, LongSupplier globalCheckpointSupplier) {
|
||||
return config(indexSettings, store, translogPath, newMergePolicy(), null, null, globalCheckpointSupplier);
|
||||
}
|
||||
|
||||
protected static final BytesReference B_1 = new BytesArray(new byte[]{1});
|
||||
protected static final BytesReference B_2 = new BytesArray(new byte[]{2});
|
||||
protected static final BytesReference B_3 = new BytesArray(new byte[]{3});
|
||||
|
@ -18,6 +18,9 @@ integTest.enabled = false
|
||||
compileJava.options.compilerArgs << "-Xlint:-try"
|
||||
compileTestJava.options.compilerArgs << "-Xlint:-try"
|
||||
|
||||
// Integration Test classes that cannot run with the security manager
|
||||
String[] noSecurityManagerITClasses = [ "**/CloseFollowerIndexIT.class" ]
|
||||
|
||||
// Instead we create a separate task to run the
|
||||
// tests based on ESIntegTestCase
|
||||
task internalClusterTest(type: RandomizedTestingTask,
|
||||
@ -25,11 +28,22 @@ task internalClusterTest(type: RandomizedTestingTask,
|
||||
description: 'Java fantasy integration tests',
|
||||
dependsOn: unitTest.dependsOn) {
|
||||
include '**/*IT.class'
|
||||
exclude noSecurityManagerITClasses
|
||||
systemProperty 'es.set.netty.runtime.available.processors', 'false'
|
||||
}
|
||||
check.dependsOn internalClusterTest
|
||||
internalClusterTest.mustRunAfter test
|
||||
|
||||
task internalClusterTestNoSecurityManager(type: RandomizedTestingTask,
|
||||
group: JavaBasePlugin.VERIFICATION_GROUP,
|
||||
description: 'Java fantasy integration tests with no security manager',
|
||||
dependsOn: unitTest.dependsOn) {
|
||||
include noSecurityManagerITClasses
|
||||
systemProperty 'es.set.netty.runtime.available.processors', 'false'
|
||||
systemProperty 'tests.security.manager', 'false'
|
||||
}
|
||||
internalClusterTest.dependsOn internalClusterTestNoSecurityManager
|
||||
|
||||
// add all sub-projects of the qa sub-project
|
||||
gradle.projectsEvaluated {
|
||||
project.subprojects
|
||||
|
@ -15,6 +15,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
|
||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
@ -204,7 +205,7 @@ public class ShardFollowTasksExecutor extends PersistentTasksExecutor<ShardFollo
|
||||
Runnable handler,
|
||||
Consumer<Exception> onFailure) {
|
||||
CloseIndexRequest closeRequest = new CloseIndexRequest(followIndex);
|
||||
CheckedConsumer<AcknowledgedResponse, Exception> onResponse = response -> {
|
||||
CheckedConsumer<CloseIndexResponse, Exception> onResponse = response -> {
|
||||
updateSettingsAndOpenIndex(followIndex, updatedSettings, handler, onFailure);
|
||||
};
|
||||
followerClient.admin().indices().close(closeRequest, ActionListener.wrap(onResponse, onFailure));
|
||||
|
@ -11,17 +11,22 @@ import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.ActiveShardCount;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.ReadOnlyEngine;
|
||||
import org.elasticsearch.xpack.CcrIntegTestCase;
|
||||
import org.elasticsearch.xpack.core.ccr.action.PutFollowAction;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
@ -31,6 +36,36 @@ import static org.hamcrest.Matchers.is;
|
||||
|
||||
public class CloseFollowerIndexIT extends CcrIntegTestCase {
|
||||
|
||||
private Thread.UncaughtExceptionHandler uncaughtExceptionHandler;
|
||||
|
||||
@Before
|
||||
public void wrapUncaughtExceptionHandler() {
|
||||
uncaughtExceptionHandler = Thread.getDefaultUncaughtExceptionHandler();
|
||||
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
|
||||
Thread.setDefaultUncaughtExceptionHandler((t, e) -> {
|
||||
if (t.getThreadGroup().getName().contains(getTestClass().getSimpleName())) {
|
||||
for (StackTraceElement element : e.getStackTrace()) {
|
||||
if (element.getClassName().equals(ReadOnlyEngine.class.getName())) {
|
||||
if (element.getMethodName().equals("assertMaxSeqNoEqualsToGlobalCheckpoint")) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
uncaughtExceptionHandler.uncaughtException(t, e);
|
||||
});
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
@After
|
||||
public void restoreUncaughtExceptionHandler() {
|
||||
AccessController.doPrivileged((PrivilegedAction<Void>) () -> {
|
||||
Thread.setDefaultUncaughtExceptionHandler(uncaughtExceptionHandler);
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
public void testCloseAndReopenFollowerIndex() throws Exception {
|
||||
final String leaderIndexSettings = getIndexSettings(1, 1, singletonMap(IndexSettings.INDEX_SOFT_DELETES_SETTING.getKey(), "true"));
|
||||
assertAcked(leaderClient().admin().indices().prepareCreate("index1").setSource(leaderIndexSettings, XContentType.JSON));
|
||||
@ -62,20 +97,29 @@ public class CloseFollowerIndexIT extends CcrIntegTestCase {
|
||||
}
|
||||
|
||||
atLeastDocsIndexed(followerClient(), "index2", 32);
|
||||
AcknowledgedResponse response = followerClient().admin().indices().close(new CloseIndexRequest("index2")).get();
|
||||
|
||||
CloseIndexRequest closeIndexRequest = new CloseIndexRequest("index2");
|
||||
closeIndexRequest.waitForActiveShards(ActiveShardCount.NONE);
|
||||
AcknowledgedResponse response = followerClient().admin().indices().close(closeIndexRequest).get();
|
||||
assertThat(response.isAcknowledged(), is(true));
|
||||
|
||||
ClusterState clusterState = followerClient().admin().cluster().prepareState().get().getState();
|
||||
List<ClusterBlock> blocks = new ArrayList<>(clusterState.getBlocks().indices().get("index2"));
|
||||
assertThat(blocks.size(), equalTo(1));
|
||||
assertThat(blocks.get(0).id(), equalTo(MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID));
|
||||
assertThat(clusterState.metaData().index("index2").getState(), is(IndexMetaData.State.CLOSE));
|
||||
assertThat(clusterState.getBlocks().hasIndexBlock("index2", MetaDataIndexStateService.INDEX_CLOSED_BLOCK), is(true));
|
||||
assertThat(followerClient().admin().cluster().prepareHealth("index2").get().getStatus(), equalTo(ClusterHealthStatus.RED));
|
||||
|
||||
isRunning.set(false);
|
||||
for (Thread thread : threads) {
|
||||
thread.join();
|
||||
}
|
||||
|
||||
assertAcked(followerClient().admin().indices().open(new OpenIndexRequest("index2")).get());
|
||||
|
||||
clusterState = followerClient().admin().cluster().prepareState().get().getState();
|
||||
assertThat(clusterState.metaData().index("index2").getState(), is(IndexMetaData.State.OPEN));
|
||||
assertThat(clusterState.getBlocks().hasIndexBlockWithId("index2", MetaDataIndexStateService.INDEX_CLOSED_BLOCK_ID), is(false));
|
||||
ensureFollowerGreen("index2");
|
||||
|
||||
refresh(leaderClient(), "index1");
|
||||
SearchRequest leaderSearchRequest = new SearchRequest("index1");
|
||||
leaderSearchRequest.source().trackTotalHits(true);
|
||||
@ -86,6 +130,6 @@ public class CloseFollowerIndexIT extends CcrIntegTestCase {
|
||||
followerSearchRequest.source().trackTotalHits(true);
|
||||
long followerIndexDocs = followerClient().search(followerSearchRequest).actionGet().getHits().getTotalHits().value;
|
||||
assertThat(followerIndexDocs, equalTo(leaderIndexDocs));
|
||||
});
|
||||
}, 30L, TimeUnit.SECONDS);
|
||||
}
|
||||
}
|
||||
|
@ -942,7 +942,7 @@ public class IndexFollowingIT extends CcrIntegTestCase {
|
||||
}
|
||||
|
||||
assertBusy(() -> {
|
||||
assertThat(getFollowTaskSettingsVersion("follower"), equalTo(2L));
|
||||
assertThat(getFollowTaskSettingsVersion("follower"), equalTo(4L));
|
||||
assertThat(getFollowTaskMappingVersion("follower"), equalTo(2L));
|
||||
|
||||
GetSettingsRequest getSettingsRequest = new GetSettingsRequest();
|
||||
|
@ -12,6 +12,7 @@ import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.action.admin.indices.close.TransportCloseIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexResponse;
|
||||
@ -126,9 +127,9 @@ public final class TransportFreezeIndexAction extends
|
||||
.masterNodeTimeout(request.masterNodeTimeout())
|
||||
.indices(concreteIndices);
|
||||
|
||||
indexStateService.closeIndices(closeRequest, new ActionListener<AcknowledgedResponse>() {
|
||||
indexStateService.closeIndices(closeRequest, new ActionListener<CloseIndexResponse>() {
|
||||
@Override
|
||||
public void onResponse(final AcknowledgedResponse response) {
|
||||
public void onResponse(final CloseIndexResponse response) {
|
||||
if (response.isAcknowledged()) {
|
||||
toggleFrozenSettings(concreteIndices, request, listener);
|
||||
} else {
|
||||
|
@ -349,7 +349,7 @@ public class FrozenIndexTests extends ESSingleNodeTestCase {
|
||||
assertAcked(xPackClient.freeze(new TransportFreezeIndexAction.FreezeRequest(index)));
|
||||
assertIndexFrozen(index);
|
||||
assertThat(client().admin().cluster().prepareState().get().getState().metaData().index(index).getSettingsVersion(),
|
||||
equalTo(settingsVersion + 1));
|
||||
greaterThan(settingsVersion));
|
||||
}
|
||||
|
||||
public void testFreezeEmptyIndexWithTranslogOps() throws Exception {
|
||||
|
@ -8,7 +8,7 @@ package org.elasticsearch.xpack.core.indexlifecycle;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
|
||||
import org.elasticsearch.action.support.master.AcknowledgedResponse;
|
||||
import org.elasticsearch.action.admin.indices.close.CloseIndexResponse;
|
||||
import org.elasticsearch.client.AdminClient;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.IndicesAdminClient;
|
||||
@ -43,8 +43,8 @@ public class CloseFollowerIndexStepTests extends AbstractStepTestCase<CloseFollo
|
||||
CloseIndexRequest closeIndexRequest = (CloseIndexRequest) invocation.getArguments()[0];
|
||||
assertThat(closeIndexRequest.indices()[0], equalTo("follower-index"));
|
||||
@SuppressWarnings("unchecked")
|
||||
ActionListener<AcknowledgedResponse> listener = (ActionListener<AcknowledgedResponse>) invocation.getArguments()[1];
|
||||
listener.onResponse(new AcknowledgedResponse(true));
|
||||
ActionListener<CloseIndexResponse> listener = (ActionListener<CloseIndexResponse>) invocation.getArguments()[1];
|
||||
listener.onResponse(new CloseIndexResponse(true, true));
|
||||
return null;
|
||||
}).when(indicesClient).close(Mockito.any(), Mockito.any());
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user