Dry up Snapshot ITs further (#59035) (#59052)

Some more obvious cleaning up of the snapshot ITs.

follow up to #58818
This commit is contained in:
Armin Braun 2020-07-06 12:26:42 +02:00 committed by GitHub
parent f0dd9b4ace
commit 62eabdac6e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 225 additions and 620 deletions

View File

@ -165,10 +165,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
createRandomIndex(idxName);
createRepository("test-repo", "fs", Settings.builder()
.put("location", randomRepoPath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository("test-repo", "fs");
final String masterNode1 = internalCluster().getMasterName();
@ -248,7 +245,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
index(idxName, "type", JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject());
final String repoName = "test-repo";
createRepository(repoName, "mock", randomRepoPath());
createRepository(repoName, "mock");
final String masterNode = internalCluster().getMasterName();
@ -304,7 +301,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
final String dataNode = internalCluster().startDataOnlyNode();
ensureStableCluster(4);
final String repoName = "test-repo";
createRepository(repoName, "mock", randomRepoPath());
createRepository(repoName, "mock");
final String indexName = "index-one";
createIndex(indexName);
@ -363,8 +360,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
}
private void createRandomIndex(String idxName) throws InterruptedException {
assertAcked(prepareCreate(idxName, 0, Settings.builder().put("number_of_shards", between(1, 20))
.put("number_of_replicas", 0)));
assertAcked(prepareCreate(idxName, 0, indexSettingsNoReplicas(between(1, 5))));
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.RepositoryCleanupInProgress;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.repositories.RepositoriesService;
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
@ -81,9 +80,7 @@ public class BlobStoreRepositoryCleanupIT extends AbstractSnapshotIntegTestCase
internalCluster().startMasterOnlyNodes(2);
internalCluster().startDataOnlyNodes(1);
createRepository(repoName, "mock", Settings.builder()
.put("location", randomRepoPath()).put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository(repoName, "mock");
logger.info("--> snapshot");
client().admin().cluster().prepareCreateSnapshot(repoName, "test-snap")
@ -112,8 +109,7 @@ public class BlobStoreRepositoryCleanupIT extends AbstractSnapshotIntegTestCase
internalCluster().startNodes(Settings.EMPTY);
final String repoName = "test-repo";
createRepository(repoName, "fs", Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository(repoName, "fs");
logger.info("--> create three snapshots");
for (int i = 0; i < 3; ++i) {

View File

@ -28,11 +28,9 @@ import org.elasticsearch.action.bulk.BulkItemResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkResponse;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.test.ESIntegTestCase;
import java.io.IOException;
@ -78,7 +76,7 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
final String snapshot1 = "snap-1";
final String repo = "test-repo";
createRepository(repo, "fs", randomRepoPath());
createRepository(repo, "fs");
logger.info("--> creating snapshot 1");
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
@ -152,7 +150,7 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
final String snapshot1 = "snap-1";
final String repo = "test-repo";
createRepository(repo, "fs", randomRepoPath());
createRepository(repo, "fs");
logger.info("--> creating snapshot 1");
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
@ -172,9 +170,9 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
assertThat(secondSnapshotShardStatus.getIncrementalFileCount(), greaterThan(0));
}
private void assertCountInIndexThenDelete(String index, long expectedCount) throws ExecutionException, InterruptedException {
private void assertCountInIndexThenDelete(String index, long expectedCount) {
logger.info("--> asserting that index [{}] contains [{}] documents", index, expectedCount);
assertThat(getCountForIndex(index), is(expectedCount));
assertDocCount(index, expectedCount);
logger.info("--> deleting index [{}]", index);
assertThat(client().admin().indices().prepareDelete(index).get().isAcknowledged(), is(true));
}
@ -203,9 +201,4 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
assertThat(restoreInfo.totalShards(), is(1));
assertThat(restoreInfo.failedShards(), is(0));
}
private long getCountForIndex(String indexName) throws ExecutionException, InterruptedException {
return client().search(new SearchRequest(new SearchRequest(indexName).source(
new SearchSourceBuilder().size(0).trackTotalHits(true)))).get().getHits().getTotalHits().value;
}
}

View File

@ -45,7 +45,6 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.NamedDiff;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.Metadata;
import org.elasticsearch.cluster.node.DiscoveryNode;
@ -62,7 +61,6 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -117,7 +115,6 @@ import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL;
import static org.elasticsearch.test.NodeRoles.nonMasterNode;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
@ -225,7 +222,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
setSettingValue.accept("new value");
assertSettingValue.accept("new value");
createRepository("test-repo", "fs", randomRepoPath());
createRepository("test-repo", "fs");
createFullSnapshot("test-repo", "test-snap");
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet()
.getSnapshots().get(0).state(),
@ -372,17 +369,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
logger.info("--> start 2 nodes");
Client client = client();
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(2)));
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
indexRandomDocs("test-idx", 100);
final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock",
Settings.builder().put("location", repoPath).put("random", randomAlphaOfLength(10))
@ -419,16 +408,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
nodes.add(internalCluster().startNode());
Client client = client();
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(2)));
ensureGreen();
logger.info("--> indexing some data");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
indexRandomDocs("test-idx", 100);
Path repo = randomRepoPath();
createRepository("test-repo", "mock",
@ -490,16 +472,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
cluster().wipeIndices("_all");
logger.info("--> create an index that will have some unallocated shards");
assertAcked(prepareCreate("test-idx-some", 2, Settings.builder().put("number_of_shards", 6)
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx-some", 2, indexSettingsNoReplicas(6)));
ensureGreen();
logger.info("--> indexing some data into test-idx-some");
for (int i = 0; i < 100; i++) {
index("test-idx-some", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client().prepareSearch("test-idx-some").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
indexRandomDocs("test-idx-some", 100);
logger.info("--> shutdown one of the nodes");
internalCluster().stopRandomDataNode();
@ -508,8 +483,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
equalTo(false));
logger.info("--> create an index that will have all allocated shards");
assertAcked(prepareCreate("test-idx-all", 1, Settings.builder().put("number_of_shards", 6)
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx-all", 1, indexSettingsNoReplicas(6)));
ensureGreen("test-idx-all");
logger.info("--> create an index that will be closed");
@ -531,7 +505,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
.put("number_of_replicas", 0)).setWaitForActiveShards(ActiveShardCount.NONE).get());
assertTrue(client().admin().indices().prepareExists("test-idx-none").get().isExists());
createRepository("test-repo", "fs", randomRepoPath());
createRepository("test-repo", "fs");
logger.info("--> start snapshot with default settings without a closed index - should fail");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
@ -603,8 +577,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
assertThat(client().prepareSearch("test-idx-all").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
assertDocCount("test-idx-all", 100L);
logger.info("--> restore snapshot for the partial index");
cluster().wipeIndices("test-idx-some");
@ -614,9 +587,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), lessThan(6)));
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
assertThat(client().prepareSearch("test-idx-some").setSize(0).get().getHits().getTotalHits().value, allOf(greaterThan(0L),
lessThan(100L)));
assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L)));
logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully");
cluster().wipeIndices("test-idx-none");
@ -626,9 +597,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(0));
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(6));
assertThat(client().prepareSearch("test-idx-some").setSize(0).get().getHits().getTotalHits().value, allOf(greaterThan(0L),
lessThan(100L)));
assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L)));
logger.info("--> restore snapshot for the closed index that was snapshotted completely");
restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2")
@ -637,8 +606,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4));
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(4));
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
assertThat(client().prepareSearch("test-idx-closed").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
assertDocCount("test-idx-closed", 100L);
}
public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception {
@ -650,20 +618,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
internalCluster().startNodes(2, nodeSettings);
cluster().wipeIndices("_all");
createRepository("test-repo", "fs", randomRepoPath());
createRepository("test-repo", "fs");
int numberOfShards = 6;
logger.info("--> create an index that will have some unallocated shards");
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", numberOfShards)
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(numberOfShards)));
ensureGreen();
logger.info("--> indexing some data into test-idx");
for (int i = 0; i < 100; i++) {
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
}
refresh();
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
indexRandomDocs("test-idx", 100);
logger.info("--> start snapshot");
assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx")
@ -691,7 +652,6 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
equalTo(6));
ensureGreen("test-idx");
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
IntSet reusedShards = new IntHashSet();
List<RecoveryState> recoveryStates = client().admin().indices().prepareRecoveries("test-idx").get()
@ -786,23 +746,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
createRepository("test-repo", "fs", Settings.builder()
.put("location", repoPath).put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository("test-repo", "fs", repoPath);
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(between(1, 20))));
ensureGreen();
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();
indexRandomDocs("test-idx", randomIntBetween(10, 100));
final int numberOfShards = getNumShards("test-idx").numPrimaries;
logger.info("number of shards: {}", numberOfShards);
@ -838,23 +788,12 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock", Settings.builder()
.put("location", repoPath).put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository("test-repo", "mock", repoPath);
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
.put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(between(1, 20))));
ensureGreen();
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(10, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();
indexRandomDocs("test-idx", randomIntBetween(10, 100));
final int numberOfShards = getNumShards("test-idx").numPrimaries;
logger.info("number of shards: {}", numberOfShards);
@ -897,28 +836,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
internalCluster().startMasterOnlyNodes(2);
internalCluster().startDataOnlyNodes(2);
logger.info("--> creating repository");
final Path repoPath = randomRepoPath();
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
.setType("mock").setSettings(Settings.builder()
.put("location", repoPath)
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
createRepository("test-repo", "mock", repoPath);
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
assertAcked(prepareCreate("test-idx", 0, Settings.builder()
.put("number_of_shards", 6).put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(6)));
ensureGreen();
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(50, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test-idx", "type1",
Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();
indexRandomDocs("test-idx", randomIntBetween(50, 100));
logger.info("--> stopping random data node, which should cause shards to go missing");
internalCluster().stopRandomDataNode();
@ -973,23 +897,11 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
final String sourceIdx = "test-idx";
final String shrunkIdx = "test-idx-shrunk";
logger.info("--> creating repository");
assertAcked(client.admin().cluster().preparePutRepository(repo).setType("fs")
.setSettings(Settings.builder().put("location", randomRepoPath())
.put("compress", randomBoolean())));
createRepository(repo, "fs");
assertAcked(prepareCreate(sourceIdx, 0, Settings.builder()
.put("number_of_shards", between(2, 10)).put("number_of_replicas", 0)));
assertAcked(prepareCreate(sourceIdx, 0, indexSettingsNoReplicas(between(2, 10))));
ensureGreen();
logger.info("--> indexing some data");
IndexRequestBuilder[] builders = new IndexRequestBuilder[randomIntBetween(10, 100)];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex(sourceIdx, "type1",
Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();
indexRandomDocs(sourceIdx, randomIntBetween(10, 100));
logger.info("--> shrink the index");
assertAcked(client.admin().indices().prepareUpdateSettings(sourceIdx)
@ -1139,27 +1051,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
logger.info("--> starting a master node and two data nodes");
internalCluster().startMasterOnlyNode();
internalCluster().startDataOnlyNodes(2);
logger.info("--> creating repository");
final Path repoPath = randomRepoPath();
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
.setType("mock").setSettings(Settings.builder()
.put("location", repoPath)
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
createRepository("test-repo", "mock", repoPath);
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
assertAcked(prepareCreate("test-idx", 0, Settings.builder()
.put("number_of_shards", 5).put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(5)));
ensureGreen();
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(50, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test-idx", "type1",
Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();
indexRandomDocs("test-idx", randomIntBetween(50, 100));
final String dataNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH);
@ -1192,23 +1090,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
internalCluster().startMasterOnlyNode();
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
final Path repoPath = randomRepoPath();
createRepository("test-repo", "mock", Settings.builder()
.put("location", repoPath).put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository("test-repo", "mock", repoPath);
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
assertAcked(prepareCreate("test-idx", 0, Settings.builder()
.put("number_of_shards", 2).put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(2)));
ensureGreen();
logger.info("--> indexing some data");
final int numdocs = randomIntBetween(50, 100);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex("test-idx", "type1",
Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh();
indexRandomDocs("test-idx", randomIntBetween(50, 100));
blockAllDataNodes("test-repo");
logger.info("--> snapshot");
client(internalCluster().getMasterName()).admin().cluster()
@ -1238,16 +1126,11 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
public void testRetentionLeasesClearedOnRestore() throws Exception {
final String repoName = "test-repo-retention-leases";
createRepository(repoName, "fs", Settings.builder()
.put("location", randomRepoPath()).put("compress", randomBoolean()));
createRepository(repoName, "fs");
final String indexName = "index-retention-leases";
final int shardCount = randomIntBetween(1, 5);
assertAcked(client().admin().indices().prepareCreate(indexName)
.setSettings(Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0))
.get());
assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(indexSettingsNoReplicas(shardCount)));
final ShardId shardId = new ShardId(resolveIndex(indexName), randomIntBetween(0, shardCount - 1));
final int snapshotDocCount = iterations(10, 1000);
@ -1257,7 +1140,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
indexRequestBuilders[i] = client().prepareIndex(indexName, "_doc").setSource("field", "value");
}
indexRandom(true, indexRequestBuilders);
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), snapshotDocCount);
assertDocCount(indexName, snapshotDocCount);
final String leaseId = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
logger.debug("--> adding retention lease with id {} to {}", leaseId, shardId);
@ -1298,7 +1181,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(restoreResponse.getRestoreInfo().failedShards(), equalTo(0));
ensureGreen();
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), snapshotDocCount);
assertDocCount(indexName, snapshotDocCount);
final RetentionLeases restoredRetentionLeases = Arrays.stream(client().admin().indices().prepareStats(indexName).get()
.getShards()).filter(s -> s.getShardRouting().shardId().equals(shardId)).findFirst().get()
@ -1316,7 +1199,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
final String otherDataNode = internalCluster().startDataOnlyNode();
final String repoName = "test-repo";
createRepository(repoName, "mock", randomRepoPath());
createRepository(repoName, "mock");
blockAllDataNodes(repoName);
final String snapshotName = "test-snap";
final ActionFuture<CreateSnapshotResponse> snapshotResponse =

View File

@ -71,7 +71,7 @@ public class MetadataLoadingDuringSnapshotRestoreIT extends AbstractSnapshotInte
client().prepareIndex("others", "other").setSource("rank", 4),
client().prepareIndex("others", "other").setSource("rank", 5));
createRepository("repository", CountingMockRepositoryPlugin.TYPE, randomRepoPath());
createRepository("repository", CountingMockRepositoryPlugin.TYPE);
// Creating a snapshot does not load any metadata
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("repository", "snap")

View File

@ -68,7 +68,7 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs"));
logger.info("--> creating another repository");
createRepository("test-repo-2", "fs", randomRepoPath());
createRepository("test-repo-2", "fs");
logger.info("--> check that both repositories are in cluster state");
clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get();

View File

@ -20,8 +20,6 @@
package org.elasticsearch.snapshots;
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
import org.elasticsearch.plugins.Plugin;
@ -53,17 +51,12 @@ public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase {
internalCluster().startMasterOnlyNode();
internalCluster().startDataOnlyNode();
createRepository("test-repo", "mock", Settings.builder()
.put("location", randomRepoPath()).put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
createRepository("test-repo", "mock");
final int shards = between(1, 10);
assertAcked(prepareCreate("test-index", 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0)));
assertAcked(prepareCreate("test-index", 0, indexSettingsNoReplicas(shards)));
ensureGreen();
final int numDocs = scaledRandomIntBetween(50, 100);
for (int i = 0; i < numDocs; i++) {
index("test-index", "doc", Integer.toString(i));
}
indexRandomDocs("test-index", scaledRandomIntBetween(50, 100));
logger.info("--> blocking repository");
String blockedNode = blockNodeWithIndex("test-repo", "test-index");

View File

@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.core.internal.io.IOUtils;
@ -50,7 +49,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
public void testStatusApiConsistency() {
Client client = client();
createRepository("test-repo", "fs", randomRepoPath());
createRepository("test-repo", "fs");
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
ensureGreen();
@ -158,7 +157,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
}
public void testGetSnapshotsWithoutIndices() {
createRepository("test-repo", "fs", randomRepoPath());
createRepository("test-repo", "fs");
logger.info("--> snapshot");
final SnapshotInfo snapshotInfo =
@ -199,7 +198,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
index(indexTwo, "_doc", "some_doc_id", "foo", "bar");
final String repoName = "test-repo";
createRepository(repoName, "mock", randomRepoPath());
createRepository(repoName, "mock");
blockDataNode(repoName, dataNodeOne);
@ -279,9 +278,6 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
}
private static Settings singleShardOneNode(String node) {
return Settings.builder()
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
.put("index.routing.allocation.include._name", node).build();
return indexSettingsNoReplicas(1).put("index.routing.allocation.include._name", node).build();
}
}

View File

@ -21,12 +21,16 @@ package org.elasticsearch.snapshots;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.index.IndexRequestBuilder;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.metadata.IndexMetadata;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.DeprecationHandler;
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
@ -39,6 +43,7 @@ import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryData;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.snapshots.mockstore.MockRepository;
import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.threadpool.ThreadPool;
@ -280,6 +285,19 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
createRepository(repoName, type, Settings.builder().put("location", location));
}
protected void createRepository(String repoName, String type) {
Settings.Builder settings = Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean());
if (rarely()) {
settings = settings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES);
}
createRepository(repoName, type, settings);
}
protected static Settings.Builder indexSettingsNoReplicas(int shards) {
return Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shards)
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0);
}
/**
* Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format.
*/
@ -329,4 +347,30 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS));
return snapshotInfo;
}
protected void createIndexWithRandomDocs(String indexName, int docCount) throws InterruptedException {
createIndex(indexName);
ensureGreen();
indexRandomDocs(indexName, docCount);
}
protected void indexRandomDocs(String index, int numdocs) throws InterruptedException {
logger.info("--> indexing [{}] documents into [{}]", numdocs, index);
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
for (int i = 0; i < builders.length; i++) {
builders[i] = client().prepareIndex(index, "_doc").setId(Integer.toString(i)).setSource("field1", "bar " + i);
}
indexRandom(true, builders);
flushAndRefresh(index);
assertDocCount(index, numdocs);
}
protected long getCountForIndex(String indexName) {
return client().search(new SearchRequest(new SearchRequest(indexName).source(
new SearchSourceBuilder().size(0).trackTotalHits(true)))).actionGet().getHits().getTotalHits().value;
}
protected void assertDocCount(String index, long count) {
assertEquals(getCountForIndex(index), count);
}
}