Add read-only repository verification (#35731)
Adds a verification mode for read-only repositories. It also makes the extra bucket check on repository creation obsolete, which fixes #35703.
This commit is contained in:
parent
88d862e69f
commit
2970abfce9
|
@ -3,6 +3,7 @@
|
|||
- do:
|
||||
snapshot.create_repository:
|
||||
repository: test_repo1
|
||||
verify: false
|
||||
body:
|
||||
type: url
|
||||
settings:
|
||||
|
|
|
@ -19,15 +19,12 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.services.s3.model.CannedAccessControlList;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion;
|
||||
import com.amazonaws.services.s3.model.HeadBucketRequest;
|
||||
import com.amazonaws.services.s3.model.ObjectListing;
|
||||
import com.amazonaws.services.s3.model.S3ObjectSummary;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
|
@ -63,29 +60,6 @@ class S3BlobStore implements BlobStore {
|
|||
this.bufferSize = bufferSize;
|
||||
this.cannedACL = initCannedACL(cannedACL);
|
||||
this.storageClass = initStorageClass(storageClass);
|
||||
|
||||
// Note: the method client.doesBucketExist() may return 'true' is the bucket exists
|
||||
// but we don't have access to it (ie, 403 Forbidden response code)
|
||||
try (AmazonS3Reference clientReference = clientReference()) {
|
||||
SocketAccess.doPrivilegedVoid(() -> {
|
||||
try {
|
||||
clientReference.client().headBucket(new HeadBucketRequest(bucket));
|
||||
} catch (final AmazonServiceException e) {
|
||||
if (e.getStatusCode() == 301) {
|
||||
throw new IllegalArgumentException("the bucket [" + bucket + "] is in a different region than you configured", e);
|
||||
} else if (e.getStatusCode() == 403) {
|
||||
throw new IllegalArgumentException("you do not have permissions to access the bucket [" + bucket + "]", e);
|
||||
} else if (e.getStatusCode() == 404) {
|
||||
throw new IllegalArgumentException(
|
||||
"the bucket [" + bucket + "] does not exist;"
|
||||
+ " please create it before creating an S3 snapshot repository backed by it",
|
||||
e);
|
||||
} else {
|
||||
throw new IllegalArgumentException("error checking the existence of bucket [" + bucket + "]", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.SdkClientException;
|
||||
import com.amazonaws.services.s3.AbstractAmazonS3;
|
||||
import com.amazonaws.services.s3.model.AmazonS3Exception;
|
||||
|
@ -28,8 +27,6 @@ import com.amazonaws.services.s3.model.DeleteObjectRequest;
|
|||
import com.amazonaws.services.s3.model.DeleteObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.DeleteObjectsResult;
|
||||
import com.amazonaws.services.s3.model.GetObjectRequest;
|
||||
import com.amazonaws.services.s3.model.HeadBucketRequest;
|
||||
import com.amazonaws.services.s3.model.HeadBucketResult;
|
||||
import com.amazonaws.services.s3.model.ListObjectsRequest;
|
||||
import com.amazonaws.services.s3.model.ObjectListing;
|
||||
import com.amazonaws.services.s3.model.ObjectMetadata;
|
||||
|
@ -75,18 +72,6 @@ class MockAmazonS3 extends AbstractAmazonS3 {
|
|||
this.storageClass = storageClass;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HeadBucketResult headBucket(final HeadBucketRequest headBucketRequest) throws SdkClientException, AmazonServiceException {
|
||||
if (this.bucket.equalsIgnoreCase(headBucketRequest.getBucketName())) {
|
||||
return new HeadBucketResult();
|
||||
} else {
|
||||
final AmazonServiceException e =
|
||||
new AmazonServiceException("bucket [" + headBucketRequest.getBucketName() + "] does not exist");
|
||||
e.setStatusCode(404);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean doesObjectExist(final String bucketName, final String objectName) throws SdkClientException {
|
||||
assertThat(bucketName, equalTo(bucket));
|
||||
|
|
|
@ -19,14 +19,9 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.AmazonClientException;
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.auth.AWSCredentials;
|
||||
import com.amazonaws.auth.AWSCredentialsProvider;
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.HeadBucketRequest;
|
||||
import com.amazonaws.services.s3.model.HeadBucketResult;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
|
@ -63,11 +58,6 @@ public class RepositoryCredentialsTests extends ESTestCase {
|
|||
this.credentials = credentials;
|
||||
}
|
||||
|
||||
@Override
|
||||
public HeadBucketResult headBucket(HeadBucketRequest headBucketRequest) throws AmazonClientException, AmazonServiceException {
|
||||
return new HeadBucketResult();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static final class ProxyS3Service extends S3Service {
|
||||
|
|
|
@ -19,11 +19,7 @@
|
|||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.AmazonServiceException;
|
||||
import com.amazonaws.SdkClientException;
|
||||
import com.amazonaws.services.s3.AbstractAmazonS3;
|
||||
import com.amazonaws.services.s3.model.HeadBucketRequest;
|
||||
import com.amazonaws.services.s3.model.HeadBucketResult;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
|
@ -45,11 +41,6 @@ public class S3RepositoryTests extends ESTestCase {
|
|||
|
||||
private static class DummyS3Client extends AbstractAmazonS3 {
|
||||
|
||||
@Override
|
||||
public HeadBucketResult headBucket(final HeadBucketRequest request) throws SdkClientException, AmazonServiceException {
|
||||
return new HeadBucketResult();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void shutdown() {
|
||||
// TODO check is closed
|
||||
|
|
|
@ -184,7 +184,7 @@ setup:
|
|||
"Register a repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_permanent
|
||||
body:
|
||||
|
@ -197,7 +197,7 @@ setup:
|
|||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_permanent
|
||||
body:
|
||||
|
@ -206,6 +206,34 @@ setup:
|
|||
bucket: repository_permanent
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_permanent
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test_permanent
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_permanent
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: repository_permanent
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ setup:
|
|||
"Register a repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_temporary
|
||||
body:
|
||||
|
@ -197,7 +197,7 @@ setup:
|
|||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_temporary
|
||||
body:
|
||||
|
@ -206,6 +206,34 @@ setup:
|
|||
bucket: repository_temporary
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_temporary
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test_temporary
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_temporary
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: repository_temporary
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
|
|
|
@ -184,7 +184,7 @@ setup:
|
|||
"Register a repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ec2
|
||||
body:
|
||||
|
@ -197,7 +197,7 @@ setup:
|
|||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ec2
|
||||
body:
|
||||
|
@ -206,6 +206,34 @@ setup:
|
|||
bucket: repository_ec2
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ec2
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test_temporary
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ec2
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: repository_ec2
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
|
|
|
@ -184,20 +184,20 @@ setup:
|
|||
"Register a repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ecs
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test_temporary
|
||||
client: integration_test_ecs
|
||||
|
||||
---
|
||||
"Register a repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /repository_exception/
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ecs
|
||||
body:
|
||||
|
@ -206,6 +206,34 @@ setup:
|
|||
bucket: repository_ecs
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing bucket":
|
||||
|
||||
- do:
|
||||
catch: /repository_verification_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ecs
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: zHHkfSqlbnBsbpSgvCYtxrEfFLqghXtyPvvvKPNBnRCicNHQLE
|
||||
client: integration_test_ecs
|
||||
|
||||
---
|
||||
"Register a read-only repository with a non existing client":
|
||||
|
||||
- do:
|
||||
catch: /illegal_argument_exception/
|
||||
snapshot.create_repository:
|
||||
repository: repository_ecs
|
||||
body:
|
||||
type: s3
|
||||
settings:
|
||||
readonly: true
|
||||
bucket: repository_ecs
|
||||
client: unknown
|
||||
|
||||
---
|
||||
"Get a non existing snapshot":
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ setup:
|
|||
body:
|
||||
type: fs
|
||||
settings:
|
||||
readonly: true
|
||||
location: "test_repo_get_1_loc"
|
||||
|
||||
---
|
||||
|
@ -51,6 +52,12 @@ setup:
|
|||
|
||||
---
|
||||
"Verify created repository":
|
||||
- do:
|
||||
snapshot.verify_repository:
|
||||
repository: test_repo_get_1
|
||||
|
||||
- is_true: nodes
|
||||
|
||||
- do:
|
||||
snapshot.verify_repository:
|
||||
repository: test_repo_get_2
|
||||
|
|
|
@ -214,13 +214,14 @@ public class RepositoriesService implements ClusterStateApplier {
|
|||
|
||||
public void verifyRepository(final String repositoryName, final ActionListener<VerifyResponse> listener) {
|
||||
final Repository repository = repository(repositoryName);
|
||||
final boolean readOnly = repository.isReadOnly();
|
||||
try {
|
||||
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
|
||||
try {
|
||||
final String verificationToken = repository.startVerification();
|
||||
if (verificationToken != null) {
|
||||
try {
|
||||
verifyAction.verify(repositoryName, verificationToken, new ActionListener<VerifyResponse>() {
|
||||
verifyAction.verify(repositoryName, readOnly, verificationToken, new ActionListener<VerifyResponse>() {
|
||||
@Override
|
||||
public void onResponse(VerifyResponse verifyResponse) {
|
||||
threadPool.executor(ThreadPool.Names.SNAPSHOT).execute(() -> {
|
||||
|
|
|
@ -24,6 +24,7 @@ import com.carrotsearch.hppc.cursors.ObjectCursor;
|
|||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
@ -66,7 +67,7 @@ public class VerifyNodeRepositoryAction {
|
|||
transportService.registerRequestHandler(ACTION_NAME, VerifyNodeRepositoryRequest::new, ThreadPool.Names.SNAPSHOT, new VerifyNodeRepositoryRequestHandler());
|
||||
}
|
||||
|
||||
public void verify(String repository, String verificationToken, final ActionListener<VerifyResponse> listener) {
|
||||
public void verify(String repository, boolean readOnly, String verificationToken, final ActionListener<VerifyResponse> listener) {
|
||||
final DiscoveryNodes discoNodes = clusterService.state().nodes();
|
||||
final DiscoveryNode localNode = discoNodes.getLocalNode();
|
||||
|
||||
|
@ -74,6 +75,9 @@ public class VerifyNodeRepositoryAction {
|
|||
final List<DiscoveryNode> nodes = new ArrayList<>();
|
||||
for (ObjectCursor<DiscoveryNode> cursor : masterAndDataNodes) {
|
||||
DiscoveryNode node = cursor.value;
|
||||
if (readOnly && node.getVersion().before(Version.V_7_0_0)) {
|
||||
continue;
|
||||
}
|
||||
nodes.add(node);
|
||||
}
|
||||
final CopyOnWriteArrayList<VerificationFailure> errors = new CopyOnWriteArrayList<>();
|
||||
|
|
|
@ -628,12 +628,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
public String startVerification() {
|
||||
try {
|
||||
if (isReadOnly()) {
|
||||
// TODO: add repository verification for read-only repositories
|
||||
|
||||
// It's readonly - so there is not much we can do here to verify it apart try to create blobStore()
|
||||
// and check that is is accessible on the master
|
||||
blobStore();
|
||||
return null;
|
||||
// It's readonly - so there is not much we can do here to verify it apart from reading the blob store metadata
|
||||
latestIndexBlobId();
|
||||
return "read-only";
|
||||
} else {
|
||||
String seed = UUIDs.randomBase64UUID();
|
||||
byte[] testBytes = Strings.toUTF8Bytes(seed);
|
||||
|
@ -652,13 +649,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
|
||||
@Override
|
||||
public void endVerification(String seed) {
|
||||
if (isReadOnly()) {
|
||||
throw new UnsupportedOperationException("shouldn't be called");
|
||||
}
|
||||
try {
|
||||
blobStore().delete(basePath().add(testBlobPrefix(seed)));
|
||||
} catch (IOException exp) {
|
||||
throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp);
|
||||
if (isReadOnly() == false) {
|
||||
try {
|
||||
blobStore().delete(basePath().add(testBlobPrefix(seed)));
|
||||
} catch (IOException exp) {
|
||||
throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -888,20 +884,29 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
@Override
|
||||
public void verify(String seed, DiscoveryNode localNode) {
|
||||
assertSnapshotOrGenericThread();
|
||||
BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
|
||||
if (testBlobContainer.blobExists("master.dat")) {
|
||||
try {
|
||||
BytesArray bytes = new BytesArray(seed);
|
||||
try (InputStream stream = bytes.streamInput()) {
|
||||
testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length(), true);
|
||||
}
|
||||
} catch (IOException exp) {
|
||||
throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp);
|
||||
if (isReadOnly()) {
|
||||
try {
|
||||
latestIndexBlobId();
|
||||
} catch (IOException e) {
|
||||
throw new RepositoryVerificationException(metadata.name(), "path " + basePath() +
|
||||
" is not accessible on node " + localNode, e);
|
||||
}
|
||||
} else {
|
||||
throw new RepositoryVerificationException(metadata.name(), "a file written by master to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. "
|
||||
+ "This might indicate that the store [" + blobStore() + "] is not shared between this node and the master node or "
|
||||
+ "that permissions on the store don't allow reading files written by the master node");
|
||||
BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed)));
|
||||
if (testBlobContainer.blobExists("master.dat")) {
|
||||
try {
|
||||
BytesArray bytes = new BytesArray(seed);
|
||||
try (InputStream stream = bytes.streamInput()) {
|
||||
testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length(), true);
|
||||
}
|
||||
} catch (IOException exp) {
|
||||
throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp);
|
||||
}
|
||||
} else {
|
||||
throw new RepositoryVerificationException(metadata.name(), "a file written by master to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. "
|
||||
+ "This might indicate that the store [" + blobStore() + "] is not shared between this node and the master node or "
|
||||
+ "that permissions on the store don't allow reading files written by the master node");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -179,11 +179,18 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put("location", randomRepoPath())
|
||||
.put("random_control_io_exception_rate", 1.0).build();
|
||||
Settings readonlySettings = Settings.builder().put(settings)
|
||||
.put("readonly", true).build();
|
||||
logger.info("--> creating repository that cannot write any files - should fail");
|
||||
assertThrows(client.admin().cluster().preparePutRepository("test-repo-1")
|
||||
.setType("mock").setSettings(settings),
|
||||
RepositoryVerificationException.class);
|
||||
|
||||
logger.info("--> creating read-only repository that cannot read any files - should fail");
|
||||
assertThrows(client.admin().cluster().preparePutRepository("test-repo-2")
|
||||
.setType("mock").setSettings(readonlySettings),
|
||||
RepositoryVerificationException.class);
|
||||
|
||||
logger.info("--> creating repository that cannot write any files, but suppress verification - should be acked");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo-1")
|
||||
.setType("mock").setSettings(settings).setVerify(false));
|
||||
|
@ -191,6 +198,13 @@ public class RepositoriesIT extends AbstractSnapshotIntegTestCase {
|
|||
logger.info("--> verifying repository");
|
||||
assertThrows(client.admin().cluster().prepareVerifyRepository("test-repo-1"), RepositoryVerificationException.class);
|
||||
|
||||
logger.info("--> creating read-only repository that cannot read any files, but suppress verification - should be acked");
|
||||
assertAcked(client.admin().cluster().preparePutRepository("test-repo-2")
|
||||
.setType("mock").setSettings(readonlySettings).setVerify(false));
|
||||
|
||||
logger.info("--> verifying repository");
|
||||
assertThrows(client.admin().cluster().prepareVerifyRepository("test-repo-2"), RepositoryVerificationException.class);
|
||||
|
||||
Path location = randomRepoPath();
|
||||
|
||||
logger.info("--> creating repository");
|
||||
|
|
Loading…
Reference in New Issue