[Test] Clean up some repository-s3 tests (#31601)
This commit removes some tests in the repository-s3 plugin that have not been executed for 2+ years but have been maintained for nothing. Most of the tests in AbstractAwsTestCase were obsolete or superseded by fixture based integration tests.
This commit is contained in:
parent
ffc8b82ea3
commit
0ef22db844
|
@ -238,3 +238,30 @@ teardown:
|
|||
repository: repository-file
|
||||
snapshot: snapshot-one
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.get:
|
||||
repository: repository-url
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Delete a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.delete:
|
||||
repository: repository-url
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Restore a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_restore_exception/
|
||||
snapshot.restore:
|
||||
repository: repository-url
|
||||
snapshot: missing
|
||||
wait_for_completion: true
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Integration tests for repository-azure
|
||||
---
|
||||
"Snapshot/Restore with repository-azure":
|
||||
setup:
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
|
@ -13,7 +13,8 @@
|
|||
client: "integration_test"
|
||||
base_path: ${base_path}
|
||||
|
||||
- match: { acknowledged: true }
|
||||
---
|
||||
"Snapshot/Restore with repository-azure":
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
|
@ -172,3 +173,66 @@
|
|||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
master_timeout: 5m
|
||||
|
||||
---
|
||||
"Register a repository with a non existing container":
|
||||
|
||||
- do:
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
container: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test
|
||||
|
||||
---
|
||||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
# TODO this should be a repository_exception
|
||||
catch: /settings_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: azure
|
||||
settings:
|
||||
bucket: repository
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Delete a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Restore a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_restore_exception/
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
wait_for_completion: true
|
||||
|
||||
---
|
||||
teardown:
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
repository: repository
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Integration tests for repository-gcs
|
||||
---
|
||||
"Snapshot/Restore with repository-gcs":
|
||||
setup:
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
|
@ -13,7 +13,8 @@
|
|||
client: "integration_test"
|
||||
base_path: ${base_path}
|
||||
|
||||
- match: { acknowledged: true }
|
||||
---
|
||||
"Snapshot/Restore with repository-gcs":
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
|
@ -171,6 +172,63 @@
|
|||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
---
|
||||
"Register a repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: gcs
|
||||
settings:
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test
|
||||
|
||||
---
|
||||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: gcs
|
||||
settings:
|
||||
bucket: repository
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Delete a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Restore a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_restore_exception/
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
wait_for_completion: true
|
||||
|
||||
---
|
||||
teardown:
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# Integration tests for repository-s3
|
||||
|
||||
---
|
||||
"Snapshot/Restore with repository-s3":
|
||||
setup:
|
||||
|
||||
# Register repository
|
||||
- do:
|
||||
|
@ -15,7 +16,8 @@
|
|||
canned_acl: private
|
||||
storage_class: standard
|
||||
|
||||
- match: { acknowledged: true }
|
||||
---
|
||||
"Snapshot/Restore with repository-s3":
|
||||
|
||||
# Get repository
|
||||
- do:
|
||||
|
@ -177,6 +179,63 @@
|
|||
repository: repository
|
||||
snapshot: snapshot-one
|
||||
|
||||
---
|
||||
"Register a repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test
|
||||
|
||||
---
|
||||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: repository
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.get:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Delete a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_missing_exception/
|
||||
snapshot.delete:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
|
||||
---
|
||||
"Restore a non existing snapshot":
|
||||
|
||||
- do:
|
||||
catch: /snapshot_restore_exception/
|
||||
snapshot.restore:
|
||||
repository: repository
|
||||
snapshot: missing
|
||||
wait_for_completion: true
|
||||
|
||||
---
|
||||
teardown:
|
||||
|
||||
# Remove our repository
|
||||
- do:
|
||||
snapshot.delete_repository:
|
||||
|
|
|
@ -1,74 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsException;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ThirdParty;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
/**
|
||||
* Base class for AWS tests that require credentials.
|
||||
* <p>
|
||||
* You must specify {@code -Dtests.thirdparty=true -Dtests.config=/path/to/config}
|
||||
* in order to run these tests.
|
||||
*/
|
||||
@ThirdParty
|
||||
public abstract class AbstractAwsTestCase extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
|
||||
.put("cloud.aws.test.random", randomInt())
|
||||
.put("cloud.aws.test.write_failures", 0.1)
|
||||
.put("cloud.aws.test.read_failures", 0.1);
|
||||
|
||||
// if explicit, just load it and don't load from env
|
||||
try {
|
||||
if (Strings.hasText(System.getProperty("tests.config"))) {
|
||||
try {
|
||||
settings.loadFromPath(PathUtils.get(System.getProperty("tests.config")));
|
||||
} catch (IOException e) {
|
||||
throw new IllegalArgumentException("could not load aws tests config", e);
|
||||
}
|
||||
} else {
|
||||
throw new IllegalStateException("to run integration tests, you need to set -Dtests.thirdparty=true and -Dtests.config=/path/to/elasticsearch.yml");
|
||||
}
|
||||
} catch (SettingsException exception) {
|
||||
throw new IllegalStateException("your test configuration file is incorrect: " + System.getProperty("tests.config"), exception);
|
||||
}
|
||||
return settings.build();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(TestAwsS3Service.TestPlugin.class);
|
||||
}
|
||||
}
|
|
@ -1,409 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.ObjectListing;
|
||||
import com.amazonaws.services.s3.model.S3ObjectSummary;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
|
||||
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.ClusterAdminClient;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.repositories.RepositoryMissingException;
|
||||
import org.elasticsearch.repositories.RepositoryVerificationException;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotState;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
import org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
import org.junit.After;
|
||||
import org.junit.Before;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.greaterThan;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
|
||||
@ClusterScope(scope = Scope.SUITE, numDataNodes = 2, numClientNodes = 0, transportClientRatio = 0.0)
|
||||
public abstract class AbstractS3SnapshotRestoreTest extends AbstractAwsTestCase {
|
||||
|
||||
private String basePath;
|
||||
|
||||
@Before
|
||||
public final void wipeBefore() {
|
||||
wipeRepositories();
|
||||
basePath = "repo-" + randomInt();
|
||||
cleanRepositoryFiles(basePath);
|
||||
}
|
||||
|
||||
@After
|
||||
public final void wipeAfter() {
|
||||
wipeRepositories();
|
||||
cleanRepositoryFiles(basePath);
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
|
||||
public void testEncryption() {
|
||||
Client client = client();
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
|
||||
Settings repositorySettings = Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.CHUNK_SIZE_SETTING.getKey(), randomIntBetween(1000, 10000))
|
||||
.put(S3Repository.SERVER_SIDE_ENCRYPTION_SETTING.getKey(), true)
|
||||
.build();
|
||||
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(repositorySettings).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
||||
logger.info("--> snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-3").get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||
|
||||
assertThat(client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||
|
||||
Settings settings = internalCluster().getInstance(Settings.class);
|
||||
Settings bucket = settings.getByPrefix("repositories.s3.");
|
||||
try (AmazonS3Reference s3Client = internalCluster().getInstance(S3Service.class).client("default")) {
|
||||
String bucketName = bucket.get("bucket");
|
||||
logger.info("--> verify encryption for bucket [{}], prefix [{}]", bucketName, basePath);
|
||||
List<S3ObjectSummary> summaries = s3Client.client().listObjects(bucketName, basePath).getObjectSummaries();
|
||||
for (S3ObjectSummary summary : summaries) {
|
||||
assertThat(s3Client.client().getObjectMetadata(bucketName, summary.getKey()).getSSEAlgorithm(), equalTo("AES256"));
|
||||
}
|
||||
}
|
||||
|
||||
logger.info("--> delete some data");
|
||||
for (int i = 0; i < 50; i++) {
|
||||
client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
|
||||
}
|
||||
for (int i = 50; i < 100; i++) {
|
||||
client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
|
||||
}
|
||||
for (int i = 0; i < 100; i += 2) {
|
||||
client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
|
||||
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
|
||||
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
|
||||
|
||||
logger.info("--> close indices");
|
||||
client.admin().indices().prepareClose("test-idx-1", "test-idx-2").get();
|
||||
|
||||
logger.info("--> restore all indices from the snapshot");
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).execute().actionGet();
|
||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
|
||||
ensureGreen();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(client.prepareSearch("test-idx-2").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
assertThat(client.prepareSearch("test-idx-3").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
|
||||
|
||||
// Test restore after index deletion
|
||||
logger.info("--> delete indices");
|
||||
cluster().wipeIndices("test-idx-1", "test-idx-2");
|
||||
logger.info("--> restore one index after deletion");
|
||||
restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx-*", "-test-idx-2").execute().actionGet();
|
||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
ensureGreen();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
ClusterState clusterState = client.admin().cluster().prepareState().get().getState();
|
||||
assertThat(clusterState.getMetaData().hasIndex("test-idx-1"), equalTo(true));
|
||||
assertThat(clusterState.getMetaData().hasIndex("test-idx-2"), equalTo(false));
|
||||
}
|
||||
|
||||
/**
|
||||
* This test verifies that the test configuration is set up in a manner that
|
||||
* does not make the test {@link #testRepositoryWithCustomCredentials()} pointless.
|
||||
*/
|
||||
public void testRepositoryWithCustomCredentialsIsNotAccessibleByDefaultCredentials() {
|
||||
Client client = client();
|
||||
Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket.");
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
try {
|
||||
client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
fail("repository verification should have raise an exception!");
|
||||
} catch (RepositoryVerificationException e) {
|
||||
}
|
||||
}
|
||||
|
||||
public void testRepositoryWithBasePath() {
|
||||
Client client = client();
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
assertRepositoryIsOperational(client, "test-repo");
|
||||
}
|
||||
|
||||
public void testRepositoryWithCustomCredentials() {
|
||||
Client client = client();
|
||||
Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.private-bucket.");
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
assertRepositoryIsOperational(client, "test-repo");
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
|
||||
public void testRepositoryWithCustomEndpointProtocol() {
|
||||
Client client = client();
|
||||
Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.external-bucket.");
|
||||
logger.info("--> creating s3 repostoriy with endpoint [{}], bucket[{}] and path [{}]", bucketSettings.get("endpoint"), bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
assertRepositoryIsOperational(client, "test-repo");
|
||||
}
|
||||
|
||||
/**
|
||||
* This test verifies that the test configuration is set up in a manner that
|
||||
* does not make the test {@link #testRepositoryInRemoteRegion()} pointless.
|
||||
*/
|
||||
public void testRepositoryInRemoteRegionIsRemote() {
|
||||
Client client = client();
|
||||
Settings bucketSettings = internalCluster().getInstance(Settings.class).getByPrefix("repositories.s3.remote-bucket.");
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
try {
|
||||
client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
// Below setting intentionally omitted to assert bucket is not available in default region.
|
||||
// .put("region", privateBucketSettings.get("region"))
|
||||
).get();
|
||||
fail("repository verification should have raise an exception!");
|
||||
} catch (RepositoryVerificationException e) {
|
||||
}
|
||||
}
|
||||
|
||||
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch-cloud-aws/issues/211")
|
||||
public void testRepositoryInRemoteRegion() {
|
||||
Client client = client();
|
||||
Settings settings = internalCluster().getInstance(Settings.class);
|
||||
Settings bucketSettings = settings.getByPrefix("repositories.s3.remote-bucket.");
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", bucketSettings.get("bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
.put(S3Repository.BUCKET_SETTING.getKey(), bucketSettings.get("bucket"))
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
assertRepositoryIsOperational(client, "test-repo");
|
||||
}
|
||||
|
||||
/**
|
||||
* Test case for issue #86: https://github.com/elastic/elasticsearch-cloud-aws/issues/86
|
||||
*/
|
||||
public void testNonExistingRepo86() {
|
||||
Client client = client();
|
||||
logger.info("--> creating s3 repository with bucket[{}] and path [{}]", internalCluster().getInstance(Settings.class).get("repositories.s3.bucket"), basePath);
|
||||
PutRepositoryResponse putRepositoryResponse = client.admin().cluster().preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
logger.info("--> restore non existing snapshot");
|
||||
try {
|
||||
client.admin().cluster().prepareRestoreSnapshot("test-repo", "no-existing-snapshot").setWaitForCompletion(true).execute().actionGet();
|
||||
fail("Shouldn't be here");
|
||||
} catch (SnapshotMissingException ex) {
|
||||
// Expected
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* For issue #86: https://github.com/elastic/elasticsearch-cloud-aws/issues/86
|
||||
*/
|
||||
public void testGetDeleteNonExistingSnapshot86() {
|
||||
ClusterAdminClient client = client().admin().cluster();
|
||||
logger.info("--> creating s3 repository without any path");
|
||||
PutRepositoryResponse putRepositoryResponse = client.preparePutRepository("test-repo")
|
||||
.setType("s3").setSettings(Settings.builder()
|
||||
.put(S3Repository.BASE_PATH_SETTING.getKey(), basePath)
|
||||
).get();
|
||||
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
try {
|
||||
client.prepareGetSnapshots("test-repo").addSnapshots("no-existing-snapshot").get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (SnapshotMissingException ex) {
|
||||
// Expected
|
||||
}
|
||||
|
||||
try {
|
||||
client.prepareDeleteSnapshot("test-repo", "no-existing-snapshot").get();
|
||||
fail("Shouldn't be here");
|
||||
} catch (SnapshotMissingException ex) {
|
||||
// Expected
|
||||
}
|
||||
}
|
||||
|
||||
private void assertRepositoryIsOperational(Client client, String repository) {
|
||||
createIndex("test-idx-1");
|
||||
ensureGreen();
|
||||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
||||
logger.info("--> snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repository, "test-snap").setWaitForCompletion(true).setIndices("test-idx-*").get();
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
|
||||
|
||||
assertThat(client.admin().cluster().prepareGetSnapshots(repository).setSnapshots("test-snap").get().getSnapshots().get(0).state(), equalTo(SnapshotState.SUCCESS));
|
||||
|
||||
logger.info("--> delete some data");
|
||||
for (int i = 0; i < 50; i++) {
|
||||
client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(50L));
|
||||
|
||||
logger.info("--> close indices");
|
||||
client.admin().indices().prepareClose("test-idx-1").get();
|
||||
|
||||
logger.info("--> restore all indices from the snapshot");
|
||||
RestoreSnapshotResponse restoreSnapshotResponse = client.admin().cluster().prepareRestoreSnapshot(repository, "test-snap").setWaitForCompletion(true).execute().actionGet();
|
||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
|
||||
ensureGreen();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Deletes repositories, supports wildcard notation.
|
||||
*/
|
||||
public static void wipeRepositories(String... repositories) {
|
||||
// if nothing is provided, delete all
|
||||
if (repositories.length == 0) {
|
||||
repositories = new String[]{"*"};
|
||||
}
|
||||
for (String repository : repositories) {
|
||||
try {
|
||||
client().admin().cluster().prepareDeleteRepository(repository).execute().actionGet();
|
||||
} catch (RepositoryMissingException ex) {
|
||||
// ignore
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes content of the repository files in the bucket
|
||||
*/
|
||||
public void cleanRepositoryFiles(String basePath) {
|
||||
Settings settings = internalCluster().getInstance(Settings.class);
|
||||
Settings[] buckets = {
|
||||
settings.getByPrefix("repositories.s3."),
|
||||
settings.getByPrefix("repositories.s3.private-bucket."),
|
||||
settings.getByPrefix("repositories.s3.remote-bucket."),
|
||||
settings.getByPrefix("repositories.s3.external-bucket.")
|
||||
};
|
||||
for (Settings bucket : buckets) {
|
||||
String bucketName = bucket.get("bucket");
|
||||
|
||||
// We check that settings has been set in elasticsearch.yml integration test file
|
||||
// as described in README
|
||||
assertThat("Your settings in elasticsearch.yml are incorrect. Check README file.", bucketName, notNullValue());
|
||||
try (AmazonS3Reference s3Client = internalCluster().getInstance(S3Service.class).client("default")) {
|
||||
ObjectListing prevListing = null;
|
||||
//From http://docs.amazonwebservices.com/AmazonS3/latest/dev/DeletingMultipleObjectsUsingJava.html
|
||||
//we can do at most 1K objects per delete
|
||||
//We don't know the bucket name until first object listing
|
||||
DeleteObjectsRequest multiObjectDeleteRequest = null;
|
||||
ArrayList<DeleteObjectsRequest.KeyVersion> keys = new ArrayList<>();
|
||||
while (true) {
|
||||
ObjectListing list;
|
||||
if (prevListing != null) {
|
||||
list = s3Client.client().listNextBatchOfObjects(prevListing);
|
||||
} else {
|
||||
list = s3Client.client().listObjects(bucketName, basePath);
|
||||
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
|
||||
}
|
||||
for (S3ObjectSummary summary : list.getObjectSummaries()) {
|
||||
keys.add(new DeleteObjectsRequest.KeyVersion(summary.getKey()));
|
||||
//Every 500 objects batch the delete request
|
||||
if (keys.size() > 500) {
|
||||
multiObjectDeleteRequest.setKeys(keys);
|
||||
s3Client.client().deleteObjects(multiObjectDeleteRequest);
|
||||
multiObjectDeleteRequest = new DeleteObjectsRequest(list.getBucketName());
|
||||
keys.clear();
|
||||
}
|
||||
}
|
||||
if (list.isTruncated()) {
|
||||
prevListing = list;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!keys.isEmpty()) {
|
||||
multiObjectDeleteRequest.setKeys(keys);
|
||||
s3Client.client().deleteObjects(multiObjectDeleteRequest);
|
||||
}
|
||||
} catch (Exception ex) {
|
||||
logger.warn((Supplier<?>) () -> new ParameterizedMessage("Failed to delete S3 repository [{}]", bucketName), ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,47 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.junit.Before;
|
||||
|
||||
/**
|
||||
* This will only run if you define in your `elasticsearch.yml` file a s3 specific proxy
|
||||
* cloud.aws.s3.proxy_host: mys3proxy.company.com
|
||||
* cloud.aws.s3.proxy_port: 8080
|
||||
*/
|
||||
public class S3ProxiedSnapshotRestoreOverHttpsTests extends AbstractS3SnapshotRestoreTest {
|
||||
|
||||
private boolean proxySet = false;
|
||||
|
||||
@Override
|
||||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings settings = super.nodeSettings(nodeOrdinal);
|
||||
String proxyHost = settings.get("cloud.aws.s3.proxy_host");
|
||||
proxySet = proxyHost != null;
|
||||
return settings;
|
||||
}
|
||||
|
||||
@Before
|
||||
public void checkProxySettings() {
|
||||
assumeTrue("we are expecting proxy settings in elasticsearch.yml file", proxySet);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
public class S3SnapshotRestoreOverHttpTests extends AbstractS3SnapshotRestoreTest {
|
||||
@Override
|
||||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("cloud.aws.s3.protocol", "http");
|
||||
return settings.build();
|
||||
}
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
public class S3SnapshotRestoreOverHttpsTests extends AbstractS3SnapshotRestoreTest {
|
||||
@Override
|
||||
public Settings nodeSettings(int nodeOrdinal) {
|
||||
Settings.Builder settings = Settings.builder()
|
||||
.put(super.nodeSettings(nodeOrdinal))
|
||||
.put("cloud.aws.s3.protocol", "https");
|
||||
return settings.build();
|
||||
}
|
||||
}
|
|
@ -1,157 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.AmazonS3Exception;
|
||||
import com.amazonaws.services.s3.model.ObjectMetadata;
|
||||
import com.amazonaws.services.s3.model.PutObjectResult;
|
||||
import com.amazonaws.services.s3.model.S3Object;
|
||||
import com.amazonaws.services.s3.model.UploadPartRequest;
|
||||
import com.amazonaws.services.s3.model.UploadPartResult;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.security.MessageDigest;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
||||
import static com.carrotsearch.randomizedtesting.RandomizedTest.randomDouble;
|
||||
|
||||
public class TestAmazonS3 extends AmazonS3Wrapper {
|
||||
|
||||
protected final Logger logger = Loggers.getLogger(getClass());
|
||||
|
||||
private double writeFailureRate = 0.0;
|
||||
private double readFailureRate = 0.0;
|
||||
|
||||
private final String randomPrefix;
|
||||
|
||||
ConcurrentMap<String, AtomicLong> accessCounts = new ConcurrentHashMap<>();
|
||||
|
||||
private long incrementAndGet(String path) {
|
||||
AtomicLong value = accessCounts.get(path);
|
||||
if (value == null) {
|
||||
value = accessCounts.putIfAbsent(path, new AtomicLong(1));
|
||||
}
|
||||
if (value != null) {
|
||||
return value.incrementAndGet();
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
|
||||
public TestAmazonS3(AmazonS3 delegate, Settings settings) {
|
||||
super(delegate);
|
||||
randomPrefix = settings.get("cloud.aws.test.random");
|
||||
writeFailureRate = settings.getAsDouble("cloud.aws.test.write_failures", 0.0);
|
||||
readFailureRate = settings.getAsDouble("cloud.aws.test.read_failures", 0.0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public PutObjectResult putObject(String bucketName, String key, InputStream input, ObjectMetadata metadata) throws AmazonClientException, AmazonServiceException {
|
||||
if (shouldFail(bucketName, key, writeFailureRate)) {
|
||||
final long length = metadata.getContentLength();
|
||||
final long partToRead = (long) (length * randomDouble());
|
||||
final byte[] buffer = new byte[1024];
|
||||
for (long cur = 0; cur < partToRead; cur += buffer.length) {
|
||||
try {
|
||||
input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur));
|
||||
} catch (final IOException ex) {
|
||||
throw new ElasticsearchException("cannot read input stream", ex);
|
||||
}
|
||||
}
|
||||
logger.info("--> random write failure on putObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
|
||||
final AmazonS3Exception ex = new AmazonS3Exception("Random S3 exception");
|
||||
ex.setStatusCode(400);
|
||||
ex.setErrorCode("RequestTimeout");
|
||||
throw ex;
|
||||
} else {
|
||||
return super.putObject(bucketName, key, input, metadata);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public UploadPartResult uploadPart(UploadPartRequest request) throws AmazonClientException, AmazonServiceException {
|
||||
if (shouldFail(request.getBucketName(), request.getKey(), writeFailureRate)) {
|
||||
final long length = request.getPartSize();
|
||||
final long partToRead = (long) (length * randomDouble());
|
||||
final byte[] buffer = new byte[1024];
|
||||
for (long cur = 0; cur < partToRead; cur += buffer.length) {
|
||||
try (InputStream input = request.getInputStream()){
|
||||
input.read(buffer, 0, (int) ((partToRead - cur) > buffer.length ? buffer.length : partToRead - cur));
|
||||
} catch (final IOException ex) {
|
||||
throw new ElasticsearchException("cannot read input stream", ex);
|
||||
}
|
||||
}
|
||||
logger.info("--> random write failure on uploadPart method: throwing an exception for [bucket={}, key={}]", request.getBucketName(), request.getKey());
|
||||
final AmazonS3Exception ex = new AmazonS3Exception("Random S3 write exception");
|
||||
ex.setStatusCode(400);
|
||||
ex.setErrorCode("RequestTimeout");
|
||||
throw ex;
|
||||
} else {
|
||||
return super.uploadPart(request);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public S3Object getObject(String bucketName, String key) throws AmazonClientException, AmazonServiceException {
|
||||
if (shouldFail(bucketName, key, readFailureRate)) {
|
||||
logger.info("--> random read failure on getObject method: throwing an exception for [bucket={}, key={}]", bucketName, key);
|
||||
final AmazonS3Exception ex = new AmazonS3Exception("Random S3 read exception");
|
||||
ex.setStatusCode(404);
|
||||
throw ex;
|
||||
} else {
|
||||
return super.getObject(bucketName, key);
|
||||
}
|
||||
}
|
||||
|
||||
private boolean shouldFail(String bucketName, String key, double probability) {
|
||||
if (probability > 0.0) {
|
||||
String path = randomPrefix + "-" + bucketName + "+" + key;
|
||||
path += "/" + incrementAndGet(path);
|
||||
return Math.abs(hashCode(path)) < (Integer.MAX_VALUE * probability);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
private int hashCode(String path) {
|
||||
try {
|
||||
final MessageDigest digest = MessageDigest.getInstance("MD5");
|
||||
final byte[] bytes = digest.digest(path.getBytes("UTF-8"));
|
||||
int i = 0;
|
||||
return ((bytes[i++] & 0xFF) << 24) | ((bytes[i++] & 0xFF) << 16)
|
||||
| ((bytes[i++] & 0xFF) << 8) | (bytes[i++] & 0xFF);
|
||||
} catch (final UnsupportedEncodingException ex) {
|
||||
throw new ElasticsearchException("cannot calculate hashcode", ex);
|
||||
} catch (final NoSuchAlgorithmException ex) {
|
||||
throw new ElasticsearchException("cannot calculate hashcode", ex);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import java.util.IdentityHashMap;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
public class TestAwsS3Service extends S3Service {
|
||||
public static class TestPlugin extends S3RepositoryPlugin {
|
||||
public TestPlugin(Settings settings) {
|
||||
super(settings, new TestAwsS3Service(settings));
|
||||
}
|
||||
}
|
||||
|
||||
IdentityHashMap<AmazonS3Reference, TestAmazonS3> clients = new IdentityHashMap<>();
|
||||
|
||||
public TestAwsS3Service(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(String clientName) {
|
||||
return new AmazonS3Reference(cachedWrapper(super.client(clientName)));
|
||||
}
|
||||
|
||||
private AmazonS3 cachedWrapper(AmazonS3Reference clientReference) {
|
||||
TestAmazonS3 wrapper = clients.get(clientReference);
|
||||
if (wrapper == null) {
|
||||
wrapper = new TestAmazonS3(clientReference.client(), settings);
|
||||
clients.put(clientReference, wrapper);
|
||||
}
|
||||
return wrapper;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected synchronized void releaseCachedClients() {
|
||||
super.releaseCachedClients();
|
||||
clients.clear();
|
||||
}
|
||||
|
||||
}
|
|
@ -28,6 +28,8 @@ import org.elasticsearch.common.blobstore.BlobContainer;
|
|||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.snapshots.SnapshotMissingException;
|
||||
import org.elasticsearch.snapshots.SnapshotRestoreException;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -50,7 +52,7 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
|
|||
protected abstract void createTestRepository(String name);
|
||||
|
||||
public void testSnapshotAndRestore() throws Exception {
|
||||
String repoName = randomAsciiName();
|
||||
final String repoName = randomAsciiName();
|
||||
logger.info("--> creating repository {}", repoName);
|
||||
createTestRepository(repoName);
|
||||
int indexCount = randomIntBetween(1, 5);
|
||||
|
@ -63,7 +65,7 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
|
|||
assertHitCount(client().prepareSearch(indexNames[i]).setSize(0).get(), docCounts[i]);
|
||||
}
|
||||
|
||||
String snapshotName = randomAsciiName();
|
||||
final String snapshotName = randomAsciiName();
|
||||
logger.info("--> create snapshot {}:{}", repoName, snapshotName);
|
||||
assertSuccessfulSnapshot(client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName)
|
||||
.setWaitForCompletion(true).setIndices(indexNames));
|
||||
|
@ -109,6 +111,15 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
|
|||
|
||||
logger.info("--> delete snapshot {}:{}", repoName, snapshotName);
|
||||
assertAcked(client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get());
|
||||
|
||||
expectThrows(SnapshotMissingException.class, () ->
|
||||
client().admin().cluster().prepareGetSnapshots(repoName).setSnapshots(snapshotName).get());
|
||||
|
||||
expectThrows(SnapshotMissingException.class, () ->
|
||||
client().admin().cluster().prepareDeleteSnapshot(repoName, snapshotName).get());
|
||||
|
||||
expectThrows(SnapshotRestoreException.class, () ->
|
||||
client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName).setWaitForCompletion(randomBoolean()).get());
|
||||
}
|
||||
|
||||
public void testMultipleSnapshotAndRollback() throws Exception {
|
||||
|
@ -166,7 +177,7 @@ public abstract class ESBlobStoreRepositoryIntegTestCase extends ESIntegTestCase
|
|||
}
|
||||
}
|
||||
|
||||
public void testIndicesDeletedFromRepository() throws Exception {
|
||||
public void testIndicesDeletedFromRepository() {
|
||||
Client client = client();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
|
|
Loading…
Reference in New Issue