Some more obvious cleaning up of the snapshot ITs. follow up to #58818
This commit is contained in:
parent
f0dd9b4ace
commit
62eabdac6e
|
@ -165,10 +165,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
|
||||||
|
|
||||||
createRandomIndex(idxName);
|
createRandomIndex(idxName);
|
||||||
|
|
||||||
createRepository("test-repo", "fs", Settings.builder()
|
createRepository("test-repo", "fs");
|
||||||
.put("location", randomRepoPath())
|
|
||||||
.put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
|
|
||||||
final String masterNode1 = internalCluster().getMasterName();
|
final String masterNode1 = internalCluster().getMasterName();
|
||||||
|
|
||||||
|
@ -248,7 +245,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
|
||||||
index(idxName, "type", JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject());
|
index(idxName, "type", JsonXContent.contentBuilder().startObject().field("foo", "bar").endObject());
|
||||||
|
|
||||||
final String repoName = "test-repo";
|
final String repoName = "test-repo";
|
||||||
createRepository(repoName, "mock", randomRepoPath());
|
createRepository(repoName, "mock");
|
||||||
|
|
||||||
final String masterNode = internalCluster().getMasterName();
|
final String masterNode = internalCluster().getMasterName();
|
||||||
|
|
||||||
|
@ -304,7 +301,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
|
||||||
final String dataNode = internalCluster().startDataOnlyNode();
|
final String dataNode = internalCluster().startDataOnlyNode();
|
||||||
ensureStableCluster(4);
|
ensureStableCluster(4);
|
||||||
final String repoName = "test-repo";
|
final String repoName = "test-repo";
|
||||||
createRepository(repoName, "mock", randomRepoPath());
|
createRepository(repoName, "mock");
|
||||||
|
|
||||||
final String indexName = "index-one";
|
final String indexName = "index-one";
|
||||||
createIndex(indexName);
|
createIndex(indexName);
|
||||||
|
@ -363,8 +360,7 @@ public class SnapshotDisruptionIT extends AbstractSnapshotIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createRandomIndex(String idxName) throws InterruptedException {
|
private void createRandomIndex(String idxName) throws InterruptedException {
|
||||||
assertAcked(prepareCreate(idxName, 0, Settings.builder().put("number_of_shards", between(1, 20))
|
assertAcked(prepareCreate(idxName, 0, indexSettingsNoReplicas(between(1, 5))));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
logger.info("--> indexing some data");
|
logger.info("--> indexing some data");
|
||||||
final int numdocs = randomIntBetween(10, 100);
|
final int numdocs = randomIntBetween(10, 100);
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
||||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotRes
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.cluster.RepositoryCleanupInProgress;
|
import org.elasticsearch.cluster.RepositoryCleanupInProgress;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.repositories.RepositoriesService;
|
import org.elasticsearch.repositories.RepositoriesService;
|
||||||
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
|
import org.elasticsearch.snapshots.AbstractSnapshotIntegTestCase;
|
||||||
|
@ -81,9 +80,7 @@ public class BlobStoreRepositoryCleanupIT extends AbstractSnapshotIntegTestCase
|
||||||
internalCluster().startMasterOnlyNodes(2);
|
internalCluster().startMasterOnlyNodes(2);
|
||||||
internalCluster().startDataOnlyNodes(1);
|
internalCluster().startDataOnlyNodes(1);
|
||||||
|
|
||||||
createRepository(repoName, "mock", Settings.builder()
|
createRepository(repoName, "mock");
|
||||||
.put("location", randomRepoPath()).put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
client().admin().cluster().prepareCreateSnapshot(repoName, "test-snap")
|
client().admin().cluster().prepareCreateSnapshot(repoName, "test-snap")
|
||||||
|
@ -112,8 +109,7 @@ public class BlobStoreRepositoryCleanupIT extends AbstractSnapshotIntegTestCase
|
||||||
internalCluster().startNodes(Settings.EMPTY);
|
internalCluster().startNodes(Settings.EMPTY);
|
||||||
|
|
||||||
final String repoName = "test-repo";
|
final String repoName = "test-repo";
|
||||||
createRepository(repoName, "fs", Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean())
|
createRepository(repoName, "fs");
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
|
|
||||||
logger.info("--> create three snapshots");
|
logger.info("--> create three snapshots");
|
||||||
for (int i = 0; i < 3; ++i) {
|
for (int i = 0; i < 3; ++i) {
|
||||||
|
|
|
@ -28,11 +28,9 @@ import org.elasticsearch.action.bulk.BulkItemResponse;
|
||||||
import org.elasticsearch.action.bulk.BulkRequest;
|
import org.elasticsearch.action.bulk.BulkRequest;
|
||||||
import org.elasticsearch.action.bulk.BulkResponse;
|
import org.elasticsearch.action.bulk.BulkResponse;
|
||||||
import org.elasticsearch.action.index.IndexRequest;
|
import org.elasticsearch.action.index.IndexRequest;
|
||||||
import org.elasticsearch.action.search.SearchRequest;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
@ -78,7 +76,7 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
|
||||||
|
|
||||||
final String snapshot1 = "snap-1";
|
final String snapshot1 = "snap-1";
|
||||||
final String repo = "test-repo";
|
final String repo = "test-repo";
|
||||||
createRepository(repo, "fs", randomRepoPath());
|
createRepository(repo, "fs");
|
||||||
|
|
||||||
logger.info("--> creating snapshot 1");
|
logger.info("--> creating snapshot 1");
|
||||||
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
|
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
|
||||||
|
@ -152,7 +150,7 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
|
||||||
|
|
||||||
final String snapshot1 = "snap-1";
|
final String snapshot1 = "snap-1";
|
||||||
final String repo = "test-repo";
|
final String repo = "test-repo";
|
||||||
createRepository(repo, "fs", randomRepoPath());
|
createRepository(repo, "fs");
|
||||||
|
|
||||||
logger.info("--> creating snapshot 1");
|
logger.info("--> creating snapshot 1");
|
||||||
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
|
client().admin().cluster().prepareCreateSnapshot(repo, snapshot1).setIndices(indexName).setWaitForCompletion(true).get();
|
||||||
|
@ -172,9 +170,9 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
|
||||||
assertThat(secondSnapshotShardStatus.getIncrementalFileCount(), greaterThan(0));
|
assertThat(secondSnapshotShardStatus.getIncrementalFileCount(), greaterThan(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertCountInIndexThenDelete(String index, long expectedCount) throws ExecutionException, InterruptedException {
|
private void assertCountInIndexThenDelete(String index, long expectedCount) {
|
||||||
logger.info("--> asserting that index [{}] contains [{}] documents", index, expectedCount);
|
logger.info("--> asserting that index [{}] contains [{}] documents", index, expectedCount);
|
||||||
assertThat(getCountForIndex(index), is(expectedCount));
|
assertDocCount(index, expectedCount);
|
||||||
logger.info("--> deleting index [{}]", index);
|
logger.info("--> deleting index [{}]", index);
|
||||||
assertThat(client().admin().indices().prepareDelete(index).get().isAcknowledged(), is(true));
|
assertThat(client().admin().indices().prepareDelete(index).get().isAcknowledged(), is(true));
|
||||||
}
|
}
|
||||||
|
@ -203,9 +201,4 @@ public class BlobStoreIncrementalityIT extends AbstractSnapshotIntegTestCase {
|
||||||
assertThat(restoreInfo.totalShards(), is(1));
|
assertThat(restoreInfo.totalShards(), is(1));
|
||||||
assertThat(restoreInfo.failedShards(), is(0));
|
assertThat(restoreInfo.failedShards(), is(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
private long getCountForIndex(String indexName) throws ExecutionException, InterruptedException {
|
|
||||||
return client().search(new SearchRequest(new SearchRequest(indexName).source(
|
|
||||||
new SearchSourceBuilder().size(0).trackTotalHits(true)))).get().getHits().getTotalHits().value;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||||
import org.elasticsearch.cluster.NamedDiff;
|
import org.elasticsearch.cluster.NamedDiff;
|
||||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||||
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
import org.elasticsearch.cluster.health.ClusterHealthStatus;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
|
||||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||||
import org.elasticsearch.cluster.metadata.Metadata;
|
import org.elasticsearch.cluster.metadata.Metadata;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
@ -62,7 +61,6 @@ import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.settings.Setting;
|
import org.elasticsearch.common.settings.Setting;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.settings.SettingsFilter;
|
import org.elasticsearch.common.settings.SettingsFilter;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
import org.elasticsearch.common.util.set.Sets;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
@ -117,7 +115,6 @@ import static org.elasticsearch.index.seqno.RetentionLeaseActions.RETAIN_ALL;
|
||||||
import static org.elasticsearch.test.NodeRoles.nonMasterNode;
|
import static org.elasticsearch.test.NodeRoles.nonMasterNode;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFutureThrows;
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
|
||||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows;
|
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertRequestBuilderThrows;
|
||||||
import static org.hamcrest.Matchers.allOf;
|
import static org.hamcrest.Matchers.allOf;
|
||||||
import static org.hamcrest.Matchers.containsString;
|
import static org.hamcrest.Matchers.containsString;
|
||||||
|
@ -225,7 +222,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
setSettingValue.accept("new value");
|
setSettingValue.accept("new value");
|
||||||
assertSettingValue.accept("new value");
|
assertSettingValue.accept("new value");
|
||||||
|
|
||||||
createRepository("test-repo", "fs", randomRepoPath());
|
createRepository("test-repo", "fs");
|
||||||
createFullSnapshot("test-repo", "test-snap");
|
createFullSnapshot("test-repo", "test-snap");
|
||||||
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet()
|
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").execute().actionGet()
|
||||||
.getSnapshots().get(0).state(),
|
.getSnapshots().get(0).state(),
|
||||||
|
@ -372,17 +369,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
logger.info("--> start 2 nodes");
|
logger.info("--> start 2 nodes");
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
|
assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(2)));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs("test-idx", 100);
|
||||||
logger.info("--> indexing some data");
|
|
||||||
for (int i = 0; i < 100; i++) {
|
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
|
||||||
}
|
|
||||||
refresh();
|
|
||||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
|
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
createRepository("test-repo", "mock",
|
createRepository("test-repo", "mock",
|
||||||
Settings.builder().put("location", repoPath).put("random", randomAlphaOfLength(10))
|
Settings.builder().put("location", repoPath).put("random", randomAlphaOfLength(10))
|
||||||
|
@ -419,16 +408,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
nodes.add(internalCluster().startNode());
|
nodes.add(internalCluster().startNode());
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", 2)
|
assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(2)));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs("test-idx", 100);
|
||||||
logger.info("--> indexing some data");
|
|
||||||
for (int i = 0; i < 100; i++) {
|
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
|
||||||
}
|
|
||||||
refresh();
|
|
||||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
|
|
||||||
Path repo = randomRepoPath();
|
Path repo = randomRepoPath();
|
||||||
createRepository("test-repo", "mock",
|
createRepository("test-repo", "mock",
|
||||||
|
@ -490,16 +472,9 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
cluster().wipeIndices("_all");
|
cluster().wipeIndices("_all");
|
||||||
|
|
||||||
logger.info("--> create an index that will have some unallocated shards");
|
logger.info("--> create an index that will have some unallocated shards");
|
||||||
assertAcked(prepareCreate("test-idx-some", 2, Settings.builder().put("number_of_shards", 6)
|
assertAcked(prepareCreate("test-idx-some", 2, indexSettingsNoReplicas(6)));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs("test-idx-some", 100);
|
||||||
logger.info("--> indexing some data into test-idx-some");
|
|
||||||
for (int i = 0; i < 100; i++) {
|
|
||||||
index("test-idx-some", "doc", Integer.toString(i), "foo", "bar" + i);
|
|
||||||
}
|
|
||||||
refresh();
|
|
||||||
assertThat(client().prepareSearch("test-idx-some").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
|
|
||||||
logger.info("--> shutdown one of the nodes");
|
logger.info("--> shutdown one of the nodes");
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
|
@ -508,8 +483,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
equalTo(false));
|
equalTo(false));
|
||||||
|
|
||||||
logger.info("--> create an index that will have all allocated shards");
|
logger.info("--> create an index that will have all allocated shards");
|
||||||
assertAcked(prepareCreate("test-idx-all", 1, Settings.builder().put("number_of_shards", 6)
|
assertAcked(prepareCreate("test-idx-all", 1, indexSettingsNoReplicas(6)));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen("test-idx-all");
|
ensureGreen("test-idx-all");
|
||||||
|
|
||||||
logger.info("--> create an index that will be closed");
|
logger.info("--> create an index that will be closed");
|
||||||
|
@ -531,7 +505,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
.put("number_of_replicas", 0)).setWaitForActiveShards(ActiveShardCount.NONE).get());
|
.put("number_of_replicas", 0)).setWaitForActiveShards(ActiveShardCount.NONE).get());
|
||||||
assertTrue(client().admin().indices().prepareExists("test-idx-none").get().isExists());
|
assertTrue(client().admin().indices().prepareExists("test-idx-none").get().isExists());
|
||||||
|
|
||||||
createRepository("test-repo", "fs", randomRepoPath());
|
createRepository("test-repo", "fs");
|
||||||
|
|
||||||
logger.info("--> start snapshot with default settings without a closed index - should fail");
|
logger.info("--> start snapshot with default settings without a closed index - should fail");
|
||||||
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
|
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
|
||||||
|
@ -603,8 +577,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(6));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
|
||||||
|
assertDocCount("test-idx-all", 100L);
|
||||||
assertThat(client().prepareSearch("test-idx-all").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
|
|
||||||
logger.info("--> restore snapshot for the partial index");
|
logger.info("--> restore snapshot for the partial index");
|
||||||
cluster().wipeIndices("test-idx-some");
|
cluster().wipeIndices("test-idx-some");
|
||||||
|
@ -614,9 +587,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), lessThan(6)));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), allOf(greaterThan(0), lessThan(6)));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), greaterThan(0));
|
||||||
|
assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L)));
|
||||||
assertThat(client().prepareSearch("test-idx-some").setSize(0).get().getHits().getTotalHits().value, allOf(greaterThan(0L),
|
|
||||||
lessThan(100L)));
|
|
||||||
|
|
||||||
logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully");
|
logger.info("--> restore snapshot for the index that didn't have any shards snapshotted successfully");
|
||||||
cluster().wipeIndices("test-idx-none");
|
cluster().wipeIndices("test-idx-none");
|
||||||
|
@ -626,9 +597,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(6));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(0));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(6));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(6));
|
||||||
|
assertThat(getCountForIndex("test-idx-some"), allOf(greaterThan(0L), lessThan(100L)));
|
||||||
assertThat(client().prepareSearch("test-idx-some").setSize(0).get().getHits().getTotalHits().value, allOf(greaterThan(0L),
|
|
||||||
lessThan(100L)));
|
|
||||||
|
|
||||||
logger.info("--> restore snapshot for the closed index that was snapshotted completely");
|
logger.info("--> restore snapshot for the closed index that was snapshotted completely");
|
||||||
restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2")
|
restoreSnapshotResponse = client().admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap-2")
|
||||||
|
@ -637,8 +606,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), equalTo(4));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(4));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().successfulShards(), equalTo(4));
|
||||||
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
|
assertThat(restoreSnapshotResponse.getRestoreInfo().failedShards(), equalTo(0));
|
||||||
|
assertDocCount("test-idx-closed", 100L);
|
||||||
assertThat(client().prepareSearch("test-idx-closed").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception {
|
public void testRestoreIndexWithShardsMissingInLocalGateway() throws Exception {
|
||||||
|
@ -650,20 +618,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
internalCluster().startNodes(2, nodeSettings);
|
internalCluster().startNodes(2, nodeSettings);
|
||||||
cluster().wipeIndices("_all");
|
cluster().wipeIndices("_all");
|
||||||
|
|
||||||
createRepository("test-repo", "fs", randomRepoPath());
|
createRepository("test-repo", "fs");
|
||||||
|
|
||||||
int numberOfShards = 6;
|
int numberOfShards = 6;
|
||||||
logger.info("--> create an index that will have some unallocated shards");
|
logger.info("--> create an index that will have some unallocated shards");
|
||||||
assertAcked(prepareCreate("test-idx", 2, Settings.builder().put("number_of_shards", numberOfShards)
|
assertAcked(prepareCreate("test-idx", 2, indexSettingsNoReplicas(numberOfShards)));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs("test-idx", 100);
|
||||||
logger.info("--> indexing some data into test-idx");
|
|
||||||
for (int i = 0; i < 100; i++) {
|
|
||||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
|
||||||
}
|
|
||||||
refresh();
|
|
||||||
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
|
|
||||||
logger.info("--> start snapshot");
|
logger.info("--> start snapshot");
|
||||||
assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx")
|
assertThat(client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setIndices("test-idx")
|
||||||
|
@ -691,7 +652,6 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
equalTo(6));
|
equalTo(6));
|
||||||
|
|
||||||
ensureGreen("test-idx");
|
ensureGreen("test-idx");
|
||||||
assertThat(client().prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits().value, equalTo(100L));
|
|
||||||
|
|
||||||
IntSet reusedShards = new IntHashSet();
|
IntSet reusedShards = new IntHashSet();
|
||||||
List<RecoveryState> recoveryStates = client().admin().indices().prepareRecoveries("test-idx").get()
|
List<RecoveryState> recoveryStates = client().admin().indices().prepareRecoveries("test-idx").get()
|
||||||
|
@ -786,23 +746,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
|
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
createRepository("test-repo", "fs", Settings.builder()
|
createRepository("test-repo", "fs", repoPath);
|
||||||
.put("location", repoPath).put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
|
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(between(1, 20))));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
|
||||||
logger.info("--> indexing some data");
|
indexRandomDocs("test-idx", randomIntBetween(10, 100));
|
||||||
final int numdocs = randomIntBetween(10, 100);
|
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
|
||||||
for (int i = 0; i < builders.length; i++) {
|
|
||||||
builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
|
|
||||||
}
|
|
||||||
indexRandom(true, builders);
|
|
||||||
flushAndRefresh();
|
|
||||||
|
|
||||||
final int numberOfShards = getNumShards("test-idx").numPrimaries;
|
final int numberOfShards = getNumShards("test-idx").numPrimaries;
|
||||||
logger.info("number of shards: {}", numberOfShards);
|
logger.info("number of shards: {}", numberOfShards);
|
||||||
|
@ -838,23 +788,12 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
|
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
createRepository("test-repo", "mock", Settings.builder()
|
createRepository("test-repo", "mock", repoPath);
|
||||||
.put("location", repoPath).put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 0, Settings.builder().put("number_of_shards", between(1, 20))
|
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(between(1, 20))));
|
||||||
.put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs("test-idx", randomIntBetween(10, 100));
|
||||||
logger.info("--> indexing some data");
|
|
||||||
final int numdocs = randomIntBetween(10, 100);
|
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
|
||||||
for (int i = 0; i < builders.length; i++) {
|
|
||||||
builders[i] = client().prepareIndex("test-idx", "type1", Integer.toString(i)).setSource("field1", "bar " + i);
|
|
||||||
}
|
|
||||||
indexRandom(true, builders);
|
|
||||||
flushAndRefresh();
|
|
||||||
|
|
||||||
final int numberOfShards = getNumShards("test-idx").numPrimaries;
|
final int numberOfShards = getNumShards("test-idx").numPrimaries;
|
||||||
logger.info("number of shards: {}", numberOfShards);
|
logger.info("number of shards: {}", numberOfShards);
|
||||||
|
@ -897,28 +836,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
internalCluster().startMasterOnlyNodes(2);
|
internalCluster().startMasterOnlyNodes(2);
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
createRepository("test-repo", "mock", repoPath);
|
||||||
.setType("mock").setSettings(Settings.builder()
|
|
||||||
.put("location", repoPath)
|
|
||||||
.put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
|
|
||||||
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 0, Settings.builder()
|
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(6)));
|
||||||
.put("number_of_shards", 6).put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs("test-idx", randomIntBetween(50, 100));
|
||||||
logger.info("--> indexing some data");
|
|
||||||
final int numdocs = randomIntBetween(50, 100);
|
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
|
||||||
for (int i = 0; i < builders.length; i++) {
|
|
||||||
builders[i] = client().prepareIndex("test-idx", "type1",
|
|
||||||
Integer.toString(i)).setSource("field1", "bar " + i);
|
|
||||||
}
|
|
||||||
indexRandom(true, builders);
|
|
||||||
flushAndRefresh();
|
|
||||||
|
|
||||||
logger.info("--> stopping random data node, which should cause shards to go missing");
|
logger.info("--> stopping random data node, which should cause shards to go missing");
|
||||||
internalCluster().stopRandomDataNode();
|
internalCluster().stopRandomDataNode();
|
||||||
|
@ -973,23 +897,11 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
final String sourceIdx = "test-idx";
|
final String sourceIdx = "test-idx";
|
||||||
final String shrunkIdx = "test-idx-shrunk";
|
final String shrunkIdx = "test-idx-shrunk";
|
||||||
|
|
||||||
logger.info("--> creating repository");
|
createRepository(repo, "fs");
|
||||||
assertAcked(client.admin().cluster().preparePutRepository(repo).setType("fs")
|
|
||||||
.setSettings(Settings.builder().put("location", randomRepoPath())
|
|
||||||
.put("compress", randomBoolean())));
|
|
||||||
|
|
||||||
assertAcked(prepareCreate(sourceIdx, 0, Settings.builder()
|
assertAcked(prepareCreate(sourceIdx, 0, indexSettingsNoReplicas(between(2, 10))));
|
||||||
.put("number_of_shards", between(2, 10)).put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
indexRandomDocs(sourceIdx, randomIntBetween(10, 100));
|
||||||
logger.info("--> indexing some data");
|
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[randomIntBetween(10, 100)];
|
|
||||||
for (int i = 0; i < builders.length; i++) {
|
|
||||||
builders[i] = client().prepareIndex(sourceIdx, "type1",
|
|
||||||
Integer.toString(i)).setSource("field1", "bar " + i);
|
|
||||||
}
|
|
||||||
indexRandom(true, builders);
|
|
||||||
flushAndRefresh();
|
|
||||||
|
|
||||||
logger.info("--> shrink the index");
|
logger.info("--> shrink the index");
|
||||||
assertAcked(client.admin().indices().prepareUpdateSettings(sourceIdx)
|
assertAcked(client.admin().indices().prepareUpdateSettings(sourceIdx)
|
||||||
|
@ -1139,27 +1051,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
logger.info("--> starting a master node and two data nodes");
|
logger.info("--> starting a master node and two data nodes");
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
internalCluster().startDataOnlyNodes(2);
|
internalCluster().startDataOnlyNodes(2);
|
||||||
logger.info("--> creating repository");
|
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
assertAcked(client().admin().cluster().preparePutRepository("test-repo")
|
createRepository("test-repo", "mock", repoPath);
|
||||||
.setType("mock").setSettings(Settings.builder()
|
|
||||||
.put("location", repoPath)
|
|
||||||
.put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
|
|
||||||
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 0, Settings.builder()
|
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(5)));
|
||||||
.put("number_of_shards", 5).put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
logger.info("--> indexing some data");
|
indexRandomDocs("test-idx", randomIntBetween(50, 100));
|
||||||
final int numdocs = randomIntBetween(50, 100);
|
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
|
||||||
for (int i = 0; i < builders.length; i++) {
|
|
||||||
builders[i] = client().prepareIndex("test-idx", "type1",
|
|
||||||
Integer.toString(i)).setSource("field1", "bar " + i);
|
|
||||||
}
|
|
||||||
indexRandom(true, builders);
|
|
||||||
flushAndRefresh();
|
|
||||||
final String dataNode = blockNodeWithIndex("test-repo", "test-idx");
|
final String dataNode = blockNodeWithIndex("test-repo", "test-idx");
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH);
|
ServiceDisruptionScheme disruption = new BusyMasterServiceDisruption(random(), Priority.HIGH);
|
||||||
|
@ -1192,23 +1090,13 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
|
final List<String> dataNodes = internalCluster().startDataOnlyNodes(2);
|
||||||
final Path repoPath = randomRepoPath();
|
final Path repoPath = randomRepoPath();
|
||||||
createRepository("test-repo", "mock", Settings.builder()
|
createRepository("test-repo", "mock", repoPath);
|
||||||
.put("location", repoPath).put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
maybeInitWithOldSnapshotVersion("test-repo", repoPath);
|
||||||
|
|
||||||
assertAcked(prepareCreate("test-idx", 0, Settings.builder()
|
assertAcked(prepareCreate("test-idx", 0, indexSettingsNoReplicas(2)));
|
||||||
.put("number_of_shards", 2).put("number_of_replicas", 0)));
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
logger.info("--> indexing some data");
|
indexRandomDocs("test-idx", randomIntBetween(50, 100));
|
||||||
final int numdocs = randomIntBetween(50, 100);
|
|
||||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
|
||||||
for (int i = 0; i < builders.length; i++) {
|
|
||||||
builders[i] = client().prepareIndex("test-idx", "type1",
|
|
||||||
Integer.toString(i)).setSource("field1", "bar " + i);
|
|
||||||
}
|
|
||||||
indexRandom(true, builders);
|
|
||||||
flushAndRefresh();
|
|
||||||
blockAllDataNodes("test-repo");
|
blockAllDataNodes("test-repo");
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
client(internalCluster().getMasterName()).admin().cluster()
|
client(internalCluster().getMasterName()).admin().cluster()
|
||||||
|
@ -1238,16 +1126,11 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
|
|
||||||
public void testRetentionLeasesClearedOnRestore() throws Exception {
|
public void testRetentionLeasesClearedOnRestore() throws Exception {
|
||||||
final String repoName = "test-repo-retention-leases";
|
final String repoName = "test-repo-retention-leases";
|
||||||
createRepository(repoName, "fs", Settings.builder()
|
createRepository(repoName, "fs");
|
||||||
.put("location", randomRepoPath()).put("compress", randomBoolean()));
|
|
||||||
|
|
||||||
final String indexName = "index-retention-leases";
|
final String indexName = "index-retention-leases";
|
||||||
final int shardCount = randomIntBetween(1, 5);
|
final int shardCount = randomIntBetween(1, 5);
|
||||||
assertAcked(client().admin().indices().prepareCreate(indexName)
|
assertAcked(client().admin().indices().prepareCreate(indexName).setSettings(indexSettingsNoReplicas(shardCount)));
|
||||||
.setSettings(Settings.builder()
|
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shardCount)
|
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0))
|
|
||||||
.get());
|
|
||||||
final ShardId shardId = new ShardId(resolveIndex(indexName), randomIntBetween(0, shardCount - 1));
|
final ShardId shardId = new ShardId(resolveIndex(indexName), randomIntBetween(0, shardCount - 1));
|
||||||
|
|
||||||
final int snapshotDocCount = iterations(10, 1000);
|
final int snapshotDocCount = iterations(10, 1000);
|
||||||
|
@ -1257,7 +1140,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
indexRequestBuilders[i] = client().prepareIndex(indexName, "_doc").setSource("field", "value");
|
indexRequestBuilders[i] = client().prepareIndex(indexName, "_doc").setSource("field", "value");
|
||||||
}
|
}
|
||||||
indexRandom(true, indexRequestBuilders);
|
indexRandom(true, indexRequestBuilders);
|
||||||
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), snapshotDocCount);
|
assertDocCount(indexName, snapshotDocCount);
|
||||||
|
|
||||||
final String leaseId = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
final String leaseId = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||||
logger.debug("--> adding retention lease with id {} to {}", leaseId, shardId);
|
logger.debug("--> adding retention lease with id {} to {}", leaseId, shardId);
|
||||||
|
@ -1298,7 +1181,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
assertThat(restoreResponse.getRestoreInfo().failedShards(), equalTo(0));
|
assertThat(restoreResponse.getRestoreInfo().failedShards(), equalTo(0));
|
||||||
|
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
assertHitCount(client().prepareSearch(indexName).setSize(0).get(), snapshotDocCount);
|
assertDocCount(indexName, snapshotDocCount);
|
||||||
|
|
||||||
final RetentionLeases restoredRetentionLeases = Arrays.stream(client().admin().indices().prepareStats(indexName).get()
|
final RetentionLeases restoredRetentionLeases = Arrays.stream(client().admin().indices().prepareStats(indexName).get()
|
||||||
.getShards()).filter(s -> s.getShardRouting().shardId().equals(shardId)).findFirst().get()
|
.getShards()).filter(s -> s.getShardRouting().shardId().equals(shardId)).findFirst().get()
|
||||||
|
@ -1316,7 +1199,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
|
||||||
final String otherDataNode = internalCluster().startDataOnlyNode();
|
final String otherDataNode = internalCluster().startDataOnlyNode();
|
||||||
|
|
||||||
final String repoName = "test-repo";
|
final String repoName = "test-repo";
|
||||||
createRepository(repoName, "mock", randomRepoPath());
|
createRepository(repoName, "mock");
|
||||||
blockAllDataNodes(repoName);
|
blockAllDataNodes(repoName);
|
||||||
final String snapshotName = "test-snap";
|
final String snapshotName = "test-snap";
|
||||||
final ActionFuture<CreateSnapshotResponse> snapshotResponse =
|
final ActionFuture<CreateSnapshotResponse> snapshotResponse =
|
||||||
|
|
|
@ -71,7 +71,7 @@ public class MetadataLoadingDuringSnapshotRestoreIT extends AbstractSnapshotInte
|
||||||
client().prepareIndex("others", "other").setSource("rank", 4),
|
client().prepareIndex("others", "other").setSource("rank", 4),
|
||||||
client().prepareIndex("others", "other").setSource("rank", 5));
|
client().prepareIndex("others", "other").setSource("rank", 5));
|
||||||
|
|
||||||
createRepository("repository", CountingMockRepositoryPlugin.TYPE, randomRepoPath());
|
createRepository("repository", CountingMockRepositoryPlugin.TYPE);
|
||||||
|
|
||||||
// Creating a snapshot does not load any metadata
|
// Creating a snapshot does not load any metadata
|
||||||
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("repository", "snap")
|
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("repository", "snap")
|
||||||
|
|
|
@ -68,7 +68,7 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
|
||||||
assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs"));
|
assertThat(repositoriesMetadata.repository("test-repo-1").type(), equalTo("fs"));
|
||||||
|
|
||||||
logger.info("--> creating another repository");
|
logger.info("--> creating another repository");
|
||||||
createRepository("test-repo-2", "fs", randomRepoPath());
|
createRepository("test-repo-2", "fs");
|
||||||
|
|
||||||
logger.info("--> check that both repositories are in cluster state");
|
logger.info("--> check that both repositories are in cluster state");
|
||||||
clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get();
|
clusterStateResponse = client.admin().cluster().prepareState().clear().setMetadata(true).get();
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -20,8 +20,6 @@
|
||||||
package org.elasticsearch.snapshots;
|
package org.elasticsearch.snapshots;
|
||||||
|
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsResponse;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||||
import org.elasticsearch.plugins.Plugin;
|
import org.elasticsearch.plugins.Plugin;
|
||||||
|
@ -53,17 +51,12 @@ public class SnapshotShardsServiceIT extends AbstractSnapshotIntegTestCase {
|
||||||
internalCluster().startMasterOnlyNode();
|
internalCluster().startMasterOnlyNode();
|
||||||
internalCluster().startDataOnlyNode();
|
internalCluster().startDataOnlyNode();
|
||||||
|
|
||||||
createRepository("test-repo", "mock", Settings.builder()
|
createRepository("test-repo", "mock");
|
||||||
.put("location", randomRepoPath()).put("compress", randomBoolean())
|
|
||||||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES));
|
|
||||||
|
|
||||||
final int shards = between(1, 10);
|
final int shards = between(1, 10);
|
||||||
assertAcked(prepareCreate("test-index", 0, Settings.builder().put("number_of_shards", shards).put("number_of_replicas", 0)));
|
assertAcked(prepareCreate("test-index", 0, indexSettingsNoReplicas(shards)));
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
final int numDocs = scaledRandomIntBetween(50, 100);
|
indexRandomDocs("test-index", scaledRandomIntBetween(50, 100));
|
||||||
for (int i = 0; i < numDocs; i++) {
|
|
||||||
index("test-index", "doc", Integer.toString(i));
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info("--> blocking repository");
|
logger.info("--> blocking repository");
|
||||||
String blockedNode = blockNodeWithIndex("test-repo", "test-index");
|
String blockedNode = blockNodeWithIndex("test-repo", "test-index");
|
||||||
|
|
|
@ -29,7 +29,6 @@ import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotStatus;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
|
import org.elasticsearch.action.admin.cluster.snapshots.status.SnapshotsStatusRequest;
|
||||||
import org.elasticsearch.client.Client;
|
import org.elasticsearch.client.Client;
|
||||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.core.internal.io.IOUtils;
|
import org.elasticsearch.core.internal.io.IOUtils;
|
||||||
|
@ -50,7 +49,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
|
||||||
public void testStatusApiConsistency() {
|
public void testStatusApiConsistency() {
|
||||||
Client client = client();
|
Client client = client();
|
||||||
|
|
||||||
createRepository("test-repo", "fs", randomRepoPath());
|
createRepository("test-repo", "fs");
|
||||||
|
|
||||||
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
||||||
ensureGreen();
|
ensureGreen();
|
||||||
|
@ -158,7 +157,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testGetSnapshotsWithoutIndices() {
|
public void testGetSnapshotsWithoutIndices() {
|
||||||
createRepository("test-repo", "fs", randomRepoPath());
|
createRepository("test-repo", "fs");
|
||||||
|
|
||||||
logger.info("--> snapshot");
|
logger.info("--> snapshot");
|
||||||
final SnapshotInfo snapshotInfo =
|
final SnapshotInfo snapshotInfo =
|
||||||
|
@ -199,7 +198,7 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
|
||||||
index(indexTwo, "_doc", "some_doc_id", "foo", "bar");
|
index(indexTwo, "_doc", "some_doc_id", "foo", "bar");
|
||||||
|
|
||||||
final String repoName = "test-repo";
|
final String repoName = "test-repo";
|
||||||
createRepository(repoName, "mock", randomRepoPath());
|
createRepository(repoName, "mock");
|
||||||
|
|
||||||
blockDataNode(repoName, dataNodeOne);
|
blockDataNode(repoName, dataNodeOne);
|
||||||
|
|
||||||
|
@ -279,9 +278,6 @@ public class SnapshotStatusApisIT extends AbstractSnapshotIntegTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Settings singleShardOneNode(String node) {
|
private static Settings singleShardOneNode(String node) {
|
||||||
return Settings.builder()
|
return indexSettingsNoReplicas(1).put("index.routing.allocation.include._name", node).build();
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, 1)
|
|
||||||
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0)
|
|
||||||
.put("index.routing.allocation.include._name", node).build();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,12 +21,16 @@ package org.elasticsearch.snapshots;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||||
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
|
||||||
|
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||||
|
import org.elasticsearch.action.search.SearchRequest;
|
||||||
import org.elasticsearch.action.support.PlainActionFuture;
|
import org.elasticsearch.action.support.PlainActionFuture;
|
||||||
import org.elasticsearch.cluster.SnapshotsInProgress;
|
import org.elasticsearch.cluster.SnapshotsInProgress;
|
||||||
|
import org.elasticsearch.cluster.metadata.IndexMetadata;
|
||||||
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
|
||||||
import org.elasticsearch.common.Strings;
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.TimeValue;
|
import org.elasticsearch.common.unit.TimeValue;
|
||||||
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
import org.elasticsearch.common.xcontent.DeprecationHandler;
|
||||||
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
import org.elasticsearch.common.xcontent.NamedXContentRegistry;
|
||||||
|
@ -39,6 +43,7 @@ import org.elasticsearch.repositories.Repository;
|
||||||
import org.elasticsearch.repositories.RepositoryData;
|
import org.elasticsearch.repositories.RepositoryData;
|
||||||
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
|
||||||
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
|
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
|
||||||
|
import org.elasticsearch.search.builder.SearchSourceBuilder;
|
||||||
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
import org.elasticsearch.snapshots.mockstore.MockRepository;
|
||||||
import org.elasticsearch.test.ESIntegTestCase;
|
import org.elasticsearch.test.ESIntegTestCase;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
import org.elasticsearch.threadpool.ThreadPool;
|
||||||
|
@ -280,6 +285,19 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
|
||||||
createRepository(repoName, type, Settings.builder().put("location", location));
|
createRepository(repoName, type, Settings.builder().put("location", location));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void createRepository(String repoName, String type) {
|
||||||
|
Settings.Builder settings = Settings.builder().put("location", randomRepoPath()).put("compress", randomBoolean());
|
||||||
|
if (rarely()) {
|
||||||
|
settings = settings.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES);
|
||||||
|
}
|
||||||
|
createRepository(repoName, type, settings);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected static Settings.Builder indexSettingsNoReplicas(int shards) {
|
||||||
|
return Settings.builder().put(IndexMetadata.SETTING_NUMBER_OF_SHARDS, shards)
|
||||||
|
.put(IndexMetadata.SETTING_NUMBER_OF_REPLICAS, 0);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format.
|
* Randomly write an empty snapshot of an older version to an empty repository to simulate an older repository metadata format.
|
||||||
*/
|
*/
|
||||||
|
@ -329,4 +347,30 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
|
||||||
assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS));
|
assertThat(snapshotInfo.state(), is(SnapshotState.SUCCESS));
|
||||||
return snapshotInfo;
|
return snapshotInfo;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected void createIndexWithRandomDocs(String indexName, int docCount) throws InterruptedException {
|
||||||
|
createIndex(indexName);
|
||||||
|
ensureGreen();
|
||||||
|
indexRandomDocs(indexName, docCount);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void indexRandomDocs(String index, int numdocs) throws InterruptedException {
|
||||||
|
logger.info("--> indexing [{}] documents into [{}]", numdocs, index);
|
||||||
|
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
||||||
|
for (int i = 0; i < builders.length; i++) {
|
||||||
|
builders[i] = client().prepareIndex(index, "_doc").setId(Integer.toString(i)).setSource("field1", "bar " + i);
|
||||||
|
}
|
||||||
|
indexRandom(true, builders);
|
||||||
|
flushAndRefresh(index);
|
||||||
|
assertDocCount(index, numdocs);
|
||||||
|
}
|
||||||
|
|
||||||
|
protected long getCountForIndex(String indexName) {
|
||||||
|
return client().search(new SearchRequest(new SearchRequest(indexName).source(
|
||||||
|
new SearchSourceBuilder().size(0).trackTotalHits(true)))).actionGet().getHits().getTotalHits().value;
|
||||||
|
}
|
||||||
|
|
||||||
|
protected void assertDocCount(String index, long count) {
|
||||||
|
assertEquals(getCountForIndex(index), count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue