Improve the error message when attempting to snapshot a closed index
Currently the error message is the same when index is closed and when it is missing shards. This commit will generate a specific failure message when a user tries to create a snapshot of a closed index. Related to #10579
This commit is contained in:
parent
717d9ec1d0
commit
faf725b788
|
@ -37,6 +37,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
|
@ -131,7 +132,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
* @throws SnapshotMissingException if snapshot is not found
|
||||
*/
|
||||
public Snapshot snapshot(SnapshotId snapshotId) {
|
||||
ImmutableList<SnapshotMetaData.Entry> entries = currentSnapshots(snapshotId.getRepository(), new String[] {snapshotId.getSnapshot()});
|
||||
ImmutableList<SnapshotMetaData.Entry> entries = currentSnapshots(snapshotId.getRepository(), new String[]{snapshotId.getSnapshot()});
|
||||
if (!entries.isEmpty()) {
|
||||
return inProgressSnapshot(entries.iterator().next());
|
||||
}
|
||||
|
@ -323,11 +324,25 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
// Replace the snapshot that was just created
|
||||
ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards = shards(currentState, entry.indices());
|
||||
if (!partial) {
|
||||
Set<String> indicesWithMissingShards = indicesWithMissingShards(shards);
|
||||
if (indicesWithMissingShards != null) {
|
||||
Tuple<Set<String>, Set<String>> indicesWithMissingShards = indicesWithMissingShards(shards, currentState.metaData());
|
||||
Set<String> missing = indicesWithMissingShards.v1();
|
||||
Set<String> closed = indicesWithMissingShards.v2();
|
||||
if (missing.isEmpty() == false || closed.isEmpty() == false) {
|
||||
StringBuilder failureMessage = new StringBuilder();
|
||||
updatedSnapshot = new SnapshotMetaData.Entry(entry, State.FAILED, shards);
|
||||
entries.add(updatedSnapshot);
|
||||
failure = "Indices don't have primary shards +[" + indicesWithMissingShards + "]";
|
||||
if (missing.isEmpty() == false ) {
|
||||
failureMessage.append("Indices don't have primary shards ");
|
||||
failureMessage.append(missing);
|
||||
}
|
||||
if (closed.isEmpty() == false ) {
|
||||
if (failureMessage.length() > 0) {
|
||||
failureMessage.append("; ");
|
||||
}
|
||||
failureMessage.append("Indices are closed ");
|
||||
failureMessage.append(closed);
|
||||
}
|
||||
failure = failureMessage.toString();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -894,22 +909,24 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns list of indices with missing shards
|
||||
* Returns list of indices with missing shards, and list of indices that are closed
|
||||
*
|
||||
* @param shards list of shard statuses
|
||||
* @return list of failed indices
|
||||
* @return list of failed and closed indices
|
||||
*/
|
||||
private Set<String> indicesWithMissingShards(ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards) {
|
||||
Set<String> indices = null;
|
||||
private Tuple<Set<String>, Set<String>> indicesWithMissingShards(ImmutableMap<ShardId, SnapshotMetaData.ShardSnapshotStatus> shards, MetaData metaData) {
|
||||
Set<String> missing = newHashSet();
|
||||
Set<String> closed = newHashSet();
|
||||
for (ImmutableMap.Entry<ShardId, SnapshotMetaData.ShardSnapshotStatus> entry : shards.entrySet()) {
|
||||
if (entry.getValue().state() == State.MISSING) {
|
||||
if (indices == null) {
|
||||
indices = newHashSet();
|
||||
if (metaData.hasIndex(entry.getKey().getIndex()) && metaData.index(entry.getKey().getIndex()).getState() == IndexMetaData.State.CLOSE) {
|
||||
closed.add(entry.getKey().getIndex());
|
||||
} else {
|
||||
missing.add(entry.getKey().getIndex());
|
||||
}
|
||||
indices.add(entry.getKey().getIndex());
|
||||
}
|
||||
}
|
||||
return indices;
|
||||
return new Tuple<>(missing, closed);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1238,6 +1255,11 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
if (indexMetaData == null) {
|
||||
// The index was deleted before we managed to start the snapshot - mark it as missing.
|
||||
builder.put(new ShardId(index, 0), new SnapshotMetaData.ShardSnapshotStatus(null, State.MISSING, "missing index"));
|
||||
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
|
||||
ShardId shardId = new ShardId(index, i);
|
||||
builder.put(shardId, new SnapshotMetaData.ShardSnapshotStatus(null, State.MISSING, "index is closed"));
|
||||
}
|
||||
} else {
|
||||
IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index);
|
||||
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
|
||||
|
|
|
@ -412,12 +412,18 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
|||
.put("number_of_replicas", 0)));
|
||||
ensureGreen("test-idx-all");
|
||||
|
||||
logger.info("--> create an index that will be closed");
|
||||
assertAcked(prepareCreate("test-idx-closed", 1, settingsBuilder().put("number_of_shards", 4).put("number_of_replicas", 0)));
|
||||
ensureGreen("test-idx-closed");
|
||||
|
||||
logger.info("--> indexing some data into test-idx-all");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-all", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-closed", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client().prepareCount("test-idx-all").get().getCount(), equalTo(100L));
|
||||
assertAcked(client().admin().indices().prepareClose("test-idx-closed"));
|
||||
|
||||
logger.info("--> create an index that will have no allocated shards");
|
||||
assertAcked(prepareCreate("test-idx-none", 1, settingsBuilder().put("number_of_shards", 6)
|
||||
|
@ -431,13 +437,19 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
|||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
logger.info("--> start snapshot with default settings - should fail");
|
||||
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).execute().actionGet();
|
||||
|
||||
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
|
||||
.setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed")
|
||||
.setWaitForCompletion(true).execute().actionGet();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().reason(), containsString("Indices don't have primary shards"));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().reason(), containsString("; Indices are closed [test-idx-closed]"));
|
||||
|
||||
|
||||
if (randomBoolean()) {
|
||||
logger.info("checking snapshot completion using status");
|
||||
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(false).setPartial(true).execute().actionGet();
|
||||
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2")
|
||||
.setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed")
|
||||
.setWaitForCompletion(false).setPartial(true).execute().actionGet();
|
||||
awaitBusy(new Predicate<Object>() {
|
||||
@Override
|
||||
public boolean apply(Object o) {
|
||||
|
@ -455,7 +467,7 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
|||
assertThat(snapshotStatuses.size(), equalTo(1));
|
||||
SnapshotStatus snapshotStatus = snapshotStatuses.get(0);
|
||||
logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
|
||||
assertThat(snapshotStatus.getShardsStats().getTotalShards(), equalTo(18));
|
||||
assertThat(snapshotStatus.getShardsStats().getTotalShards(), equalTo(22));
|
||||
assertThat(snapshotStatus.getShardsStats().getDoneShards(), lessThan(12));
|
||||
assertThat(snapshotStatus.getShardsStats().getDoneShards(), greaterThan(6));
|
||||
|
||||
|
@ -476,9 +488,11 @@ public class DedicatedClusterSnapshotRestoreTests extends AbstractSnapshotTests
|
|||
});
|
||||
} else {
|
||||
logger.info("checking snapshot completion using wait_for_completion flag");
|
||||
createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2").setWaitForCompletion(true).setPartial(true).execute().actionGet();
|
||||
createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-2")
|
||||
.setIndices("test-idx-all", "test-idx-none", "test-idx-some", "test-idx-closed")
|
||||
.setWaitForCompletion(true).setPartial(true).execute().actionGet();
|
||||
logger.info("State: [{}], Reason: [{}]", createSnapshotResponse.getSnapshotInfo().state(), createSnapshotResponse.getSnapshotInfo().reason());
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(18));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().totalShards(), equalTo(22));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), lessThan(12));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(6));
|
||||
assertThat(client().admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap-2").execute().actionGet().getSnapshots().get(0).state(), equalTo(SnapshotState.PARTIAL));
|
||||
|
|
|
@ -946,6 +946,18 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
|
||||
logger.info("--> deleting snapshot");
|
||||
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
|
||||
|
||||
logger.info("--> snapshot with closed index");
|
||||
createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx", "test-idx-closed").get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().indices().size(), equalTo(2));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().state(), equalTo(SnapshotState.FAILED));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().reason(), containsString("Indices are closed [test-idx-closed]"));
|
||||
for(SnapshotShardFailure failure : createSnapshotResponse.getSnapshotInfo().shardFailures()) {
|
||||
assertThat(failure.reason(), containsString("index is closed"));
|
||||
}
|
||||
|
||||
logger.info("--> deleting snapshot");
|
||||
client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get();
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
Loading…
Reference in New Issue