mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-17 02:14:54 +00:00
* Fix Inconsistent Shard Failure Count in Failed Snapshots This fix was necessary to allow for the below test enhancement: We were not adding shard failure entries to a failed snapshot for those snapshot entries that were never attempted because the snapshot failed during the init stage and wasn't partial. This caused the never attempted snapshots to be counted towards the successful shard count which seems wrong and broke repository consistency tests. Also, this change adjusts snapshot resiliency tests to run another snapshot at the end of each test run to guarantee a correct `index.latest` blob exists after each run. Closes #47550
This commit is contained in:
parent
bf53ca3380
commit
f0d8c785e3
@ -1090,8 +1090,13 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
||||
for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardStatus : entry.shards()) {
|
||||
ShardId shardId = shardStatus.key;
|
||||
ShardSnapshotStatus status = shardStatus.value;
|
||||
if (status.state().failed()) {
|
||||
final ShardState state = status.state();
|
||||
if (state.failed()) {
|
||||
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, status.reason()));
|
||||
} else if (state.completed() == false) {
|
||||
shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, "skipped"));
|
||||
} else {
|
||||
assert state == ShardState.SUCCESS;
|
||||
}
|
||||
}
|
||||
final ShardGenerations shardGenerations = buildGenerations(entry, metaData);
|
||||
|
@ -216,6 +216,7 @@ import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.iterableWithSize;
|
||||
import static org.hamcrest.Matchers.lessThanOrEqualTo;
|
||||
import static org.mockito.Mockito.mock;
|
||||
|
||||
@ -249,8 +250,19 @@ public class SnapshotResiliencyTests extends ESTestCase {
|
||||
clearDisruptionsAndAwaitSync();
|
||||
|
||||
final StepListener<CleanupRepositoryResponse> cleanupResponse = new StepListener<>();
|
||||
client().admin().cluster().cleanupRepository(
|
||||
new CleanupRepositoryRequest("repo"), cleanupResponse);
|
||||
final StepListener<CreateSnapshotResponse> createSnapshotResponse = new StepListener<>();
|
||||
// Create another snapshot and then clean up the repository to verify that the repository works correctly no matter the
|
||||
// failures seen during the previous test.
|
||||
client().admin().cluster().prepareCreateSnapshot("repo", "last-snapshot")
|
||||
.setWaitForCompletion(true).execute(createSnapshotResponse);
|
||||
continueOrDie(createSnapshotResponse, r -> {
|
||||
final SnapshotInfo snapshotInfo = r.getSnapshotInfo();
|
||||
// Snapshot can fail because some tests leave indices in a red state because data nodes were stopped
|
||||
assertThat(snapshotInfo.state(), either(is(SnapshotState.SUCCESS)).or(is(SnapshotState.FAILED)));
|
||||
assertThat(snapshotInfo.shardFailures(), iterableWithSize(snapshotInfo.failedShards()));
|
||||
assertThat(snapshotInfo.successfulShards(), is(snapshotInfo.totalShards() - snapshotInfo.failedShards()));
|
||||
client().admin().cluster().cleanupRepository(new CleanupRepositoryRequest("repo"), cleanupResponse);
|
||||
});
|
||||
final AtomicBoolean cleanedUp = new AtomicBoolean(false);
|
||||
continueOrDie(cleanupResponse, r -> cleanedUp.set(true));
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user