Fix Inconsistent Shard Failure Count in Failed Snapshots (#51416) (#51426)

* Fix Inconsistent Shard Failure Count in Failed Snapshots

This fix was necessary to allow for the below test enhancement:
We were not adding shard failure entries to a failed snapshot for those
snapshot entries that were never attempted because the snapshot failed during
the init stage and wasn't partial. This caused the never attempted snapshots
to be counted towards the successful shard count which seems wrong and
broke repository consistency tests.

Also, this change adjusts snapshot resiliency tests to run another snapshot
at the end of each test run to guarantee a correct `index.latest` blob exists
after each run.

Closes #47550
This commit is contained in:
Armin Braun 2020-01-24 18:20:47 +01:00 committed by GitHub
parent bf53ca3380
commit f0d8c785e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 20 additions and 3 deletions

View File

@ -1090,8 +1090,13 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardStatus : entry.shards()) {
ShardId shardId = shardStatus.key;
ShardSnapshotStatus status = shardStatus.value;
if (status.state().failed()) {
final ShardState state = status.state();
if (state.failed()) {
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, status.reason()));
} else if (state.completed() == false) {
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, "skipped"));
} else {
assert state == ShardState.SUCCESS;
}
}
final ShardGenerations shardGenerations = buildGenerations(entry, metaData);

View File

@ -216,6 +216,7 @@ import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.hasSize;
import static org.hamcrest.Matchers.instanceOf;
import static org.hamcrest.Matchers.is;
import static org.hamcrest.Matchers.iterableWithSize;
import static org.hamcrest.Matchers.lessThanOrEqualTo;
import static org.mockito.Mockito.mock;
@ -249,8 +250,19 @@ public class SnapshotResiliencyTests extends ESTestCase {
clearDisruptionsAndAwaitSync();
final StepListener<CleanupRepositoryResponse> cleanupResponse = new StepListener<>();
client().admin().cluster().cleanupRepository(
new CleanupRepositoryRequest("repo"), cleanupResponse);
final StepListener<CreateSnapshotResponse> createSnapshotResponse = new StepListener<>();
// Create another snapshot and then clean up the repository to verify that the repository works correctly no matter the
// failures seen during the previous test.
client().admin().cluster().prepareCreateSnapshot("repo", "last-snapshot")
.setWaitForCompletion(true).execute(createSnapshotResponse);
continueOrDie(createSnapshotResponse, r -> {
final SnapshotInfo snapshotInfo = r.getSnapshotInfo();
// Snapshot can fail because some tests leave indices in a red state because data nodes were stopped
assertThat(snapshotInfo.state(), either(is(SnapshotState.SUCCESS)).or(is(SnapshotState.FAILED)));
assertThat(snapshotInfo.shardFailures(), iterableWithSize(snapshotInfo.failedShards()));
assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards() - snapshotInfo.failedShards()));
client().admin().cluster().cleanupRepository(new CleanupRepositoryRequest("repo"), cleanupResponse);
});
final AtomicBoolean cleanedUp = new AtomicBoolean(false);
continueOrDie(cleanupResponse, r -> cleanedUp.set(true));