SNAPSHOT+TESTS: Rem. Mock Atomic Writes Randomness (#37011)
* Randomly doing non-atomic writes causes rare 0 byte reads from `index-N` files in tests * Removing this randomness fixes these random failures and is valid because it does not reproduce a real-world failure-mode: * Cloud-based Blob stores (S3, GCS, and Azure) do not have inconsistent partial reads of a blob, either you read a complete blob or nothing on them * For file system based blob stores the atomic move we do (to atomically write a file) by setting `java.nio.file.StandardCopyOption#ATOMIC_MOVE` would throw if the file system does not provide for atomic moves * Closes #37005
This commit is contained in:
parent
edb48321ba
commit
82b1f10eb1
|
@ -3179,7 +3179,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
*
|
||||
* See https://github.com/elastic/elasticsearch/issues/20876
|
||||
*/
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/37005")
|
||||
public void testSnapshotCanceledOnRemovedShard() throws Exception {
|
||||
final int numPrimaries = 1;
|
||||
final int numReplicas = 1;
|
||||
|
@ -3244,7 +3243,6 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
.put("location", repoPath)
|
||||
.put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f)
|
||||
// test that we can take a snapshot after a failed one, even if a partial index-N was written
|
||||
.put("allow_atomic_operations", false)
|
||||
.put("random", randomAlphaOfLength(10))));
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
|
|
|
@ -110,8 +110,6 @@ public class MockRepository extends FsRepository {
|
|||
/** Allows blocking on writing the snapshot file at the end of snapshot creation to simulate a died master node */
|
||||
private volatile boolean blockAndFailOnWriteSnapFile;
|
||||
|
||||
private volatile boolean allowAtomicOperations;
|
||||
|
||||
private volatile boolean blocked = false;
|
||||
|
||||
public MockRepository(RepositoryMetaData metadata, Environment environment,
|
||||
|
@ -127,7 +125,6 @@ public class MockRepository extends FsRepository {
|
|||
blockAndFailOnWriteSnapFile = metadata.settings().getAsBoolean("block_on_snap", false);
|
||||
randomPrefix = metadata.settings().get("random", "default");
|
||||
waitAfterUnblock = metadata.settings().getAsLong("wait_after_unblock", 0L);
|
||||
allowAtomicOperations = metadata.settings().getAsBoolean("allow_atomic_operations", true);
|
||||
logger.info("starting mock repository with random prefix {}", randomPrefix);
|
||||
}
|
||||
|
||||
|
@ -361,7 +358,6 @@ public class MockRepository extends FsRepository {
|
|||
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize,
|
||||
final boolean failIfAlreadyExists) throws IOException {
|
||||
final Random random = RandomizedContext.current().getRandom();
|
||||
if (allowAtomicOperations && random.nextBoolean()) {
|
||||
if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) {
|
||||
// Simulate a failure between the write and move operation in FsBlobContainer
|
||||
final String tempBlobName = FsBlobContainer.tempBlobName(blobName);
|
||||
|
@ -375,12 +371,6 @@ public class MockRepository extends FsRepository {
|
|||
maybeIOExceptionOrBlock(blobName);
|
||||
super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists);
|
||||
}
|
||||
} else {
|
||||
// Simulate a non-atomic write since many blob container
|
||||
// implementations does not support atomic write
|
||||
maybeIOExceptionOrBlock(blobName);
|
||||
super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue