mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-19 19:35:02 +00:00
TEST: Avoid triggering merges in FlushIT
In testSyncedFlushSkipOutOfSyncReplicas, we reindex the extra documents to all shards including the out-of-sync replica. However, reindexing to that replica can trigger merges (due to the new deletes) which cause the synced-flush failed. This test starts failing after we aggressively trigger merges segments with a large number of deletes in LUCENE-8263.
This commit is contained in:
parent
eb73dde7c8
commit
0ed3458534
@ -274,10 +274,13 @@ public class FlushIT extends ESIntegTestCase {
|
||||
"out of sync replica; num docs on replica [" + (numDocs + extraDocs) + "]; num docs on primary [" + numDocs + "]"));
|
||||
// Index extra documents to all shards - synced-flush should be ok.
|
||||
for (IndexShard indexShard : indexShards) {
|
||||
// Do reindex documents to the out of sync replica to avoid trigger merges
|
||||
if (indexShard != outOfSyncReplica) {
|
||||
for (int i = 0; i < extraDocs; i++) {
|
||||
indexDoc(IndexShardTestCase.getEngine(indexShard), "extra_" + i);
|
||||
}
|
||||
}
|
||||
}
|
||||
final ShardsSyncedFlushResult fullResult = SyncedFlushUtil.attemptSyncedFlush(logger, internalCluster(), shardId);
|
||||
assertThat(fullResult.totalShards(), equalTo(numberOfReplicas + 1));
|
||||
assertThat(fullResult.successfulShards(), equalTo(numberOfReplicas + 1));
|
||||
|
Loading…
x
Reference in New Issue
Block a user