[TEST] adds randomness between atomic and non-atomic move
operations in MockRepository
This commit is contained in:
parent
2d3a52c0f2
commit
adb7aaded4
|
@ -2675,8 +2675,12 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
public void testSnapshotSucceedsAfterSnapshotFailure() throws Exception {
|
||||
logger.info("--> creating repository");
|
||||
final Path repoPath = randomRepoPath();
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo").setType("mock").setVerify(false).setSettings(
|
||||
Settings.builder().put("location", repoPath).put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f)));
|
||||
final Client client = client();
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo").setType("mock").setVerify(false).setSettings(
|
||||
Settings.builder()
|
||||
.put("location", repoPath)
|
||||
.put("random_control_io_exception_rate", randomIntBetween(5, 20) / 100f)
|
||||
.put("random", randomAsciiOfLength(10))));
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
createIndex("test-idx");
|
||||
|
@ -2686,12 +2690,12 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo((long) numDocs));
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo((long) numDocs));
|
||||
|
||||
logger.info("--> snapshot with potential I/O failures");
|
||||
try {
|
||||
CreateSnapshotResponse createSnapshotResponse =
|
||||
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("test-idx")
|
||||
.get();
|
||||
|
@ -2702,21 +2706,21 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertThat(shardFailure.reason(), containsString("Random IOException"));
|
||||
}
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
} catch (SnapshotCreationException | RepositoryException ex) {
|
||||
// sometimes, the snapshot will fail with a top level I/O exception
|
||||
assertThat(ExceptionsHelper.stackTrace(ex), containsString("Random IOException"));
|
||||
}
|
||||
|
||||
logger.info("--> snapshot with no I/O failures");
|
||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo-2").setType("mock").setVerify(false).setSettings(
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo-2").setType("mock").setSettings(
|
||||
Settings.builder().put("location", repoPath)));
|
||||
CreateSnapshotResponse createSnapshotResponse =
|
||||
client().admin().cluster().prepareCreateSnapshot("test-repo-2", "test-snap-2")
|
||||
client.admin().cluster().prepareCreateSnapshot("test-repo-2", "test-snap-2")
|
||||
.setWaitForCompletion(true)
|
||||
.setIndices("test-idx")
|
||||
.get();
|
||||
assertEquals(0, createSnapshotResponse.getSnapshotInfo().failedShards());
|
||||
GetSnapshotsResponse getSnapshotsResponse = client().admin().cluster().prepareGetSnapshots("test-repo-2")
|
||||
GetSnapshotsResponse getSnapshotsResponse = client.admin().cluster().prepareGetSnapshots("test-repo-2")
|
||||
.addSnapshots("test-snap-2").get();
|
||||
assertEquals(SnapshotState.SUCCESS, getSnapshotsResponse.getSnapshots().get(0).state());
|
||||
}
|
||||
|
|
|
@ -33,6 +33,7 @@ import java.util.concurrent.ConcurrentHashMap;
|
|||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.RandomizedContext;
|
||||
import org.apache.lucene.index.CorruptIndexException;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
|
@ -321,20 +322,28 @@ public class MockRepository extends FsRepository {
|
|||
|
||||
@Override
|
||||
public void move(String sourceBlob, String targetBlob) throws IOException {
|
||||
// simulate a non-atomic move, since many blob container implementations
|
||||
// will not have an atomic move, and we should be able to handle that
|
||||
maybeIOExceptionOrBlock(targetBlob);
|
||||
super.writeBlob(targetBlob, super.readBlob(sourceBlob), 0L);
|
||||
super.deleteBlob(sourceBlob);
|
||||
if (RandomizedContext.current().getRandom().nextBoolean()) {
|
||||
// simulate a non-atomic move, since many blob container implementations
|
||||
// will not have an atomic move, and we should be able to handle that
|
||||
maybeIOExceptionOrBlock(targetBlob);
|
||||
super.writeBlob(targetBlob, super.readBlob(sourceBlob), 0L);
|
||||
super.deleteBlob(sourceBlob);
|
||||
} else {
|
||||
// atomic move since this inherits from FsBlobContainer which provides atomic moves
|
||||
maybeIOExceptionOrBlock(targetBlob);
|
||||
super.move(sourceBlob, targetBlob);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
|
||||
maybeIOExceptionOrBlock(blobName);
|
||||
super.writeBlob(blobName, inputStream, blobSize);
|
||||
// for network based repositories, the blob may have been written but we may still
|
||||
// get an error with the client connection, so an IOException here simulates this
|
||||
maybeIOExceptionOrBlock(blobName);
|
||||
if (RandomizedContext.current().getRandom().nextBoolean()) {
|
||||
// for network based repositories, the blob may have been written but we may still
|
||||
// get an error with the client connection, so an IOException here simulates this
|
||||
maybeIOExceptionOrBlock(blobName);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue