mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-25 09:28:27 +00:00
All the implementations of `EsBlobStoreTestCase` use the exact same bootstrap code that is also used by their implementation of `EsBlobStoreContainerTestCase`. This means all tests might as well live under `EsBlobStoreContainerTestCase` saving a lot of code duplication. Also, there was no HDFS implementation for `EsBlobStoreTestCase` which is now automatically resolved by moving the tests over since there is a HDFS implementation for the container tests.
This commit is contained in:
parent
777431265b
commit
3862400270
@ -72,7 +72,7 @@ import java.util.regex.Pattern;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreContainerTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.azure.AzureRepository.Repository.CONTAINER_SETTING;
|
||||
import static org.elasticsearch.repositories.azure.AzureStorageSettings.ACCOUNT_SETTING;
|
||||
import static org.elasticsearch.repositories.azure.AzureStorageSettings.ENDPOINT_SUFFIX_SETTING;
|
||||
|
@ -1,52 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.repositories.azure;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.repositories.ESBlobStoreTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
public class AzureBlobStoreTests extends ESBlobStoreTestCase {
|
||||
|
||||
private ThreadPool threadPool;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = new TestThreadPool("AzureBlobStoreTests", AzureRepositoryPlugin.executorBuilder());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
ThreadPool.terminate(threadPool, 10L, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected BlobStore newBlobStore() {
|
||||
RepositoryMetaData repositoryMetaData = new RepositoryMetaData("azure", "ittest", Settings.EMPTY);
|
||||
AzureStorageServiceMock client = new AzureStorageServiceMock();
|
||||
return new AzureBlobStore(repositoryMetaData, client, threadPool);
|
||||
}
|
||||
}
|
@ -74,7 +74,7 @@ import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeLimit;
|
||||
import static fixture.gcs.GoogleCloudStorageHttpHandler.getContentRangeStart;
|
||||
import static fixture.gcs.GoogleCloudStorageHttpHandler.parseMultipartRequestBody;
|
||||
import static java.nio.charset.StandardCharsets.UTF_8;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreContainerTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.CREDENTIALS_FILE_SETTING;
|
||||
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.ENDPOINT_SETTING;
|
||||
import static org.elasticsearch.repositories.gcs.GoogleCloudStorageClientSettings.READ_TIMEOUT_SETTING;
|
||||
|
@ -41,7 +41,6 @@ import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Matchers.eq;
|
||||
|
@ -1,46 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.gcs;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.repositories.ESBlobStoreTestCase;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class GoogleCloudStorageBlobStoreTests extends ESBlobStoreTestCase {
|
||||
|
||||
@Override
|
||||
protected BlobStore newBlobStore() {
|
||||
final String bucketName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final String clientName = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
final GoogleCloudStorageService storageService = mock(GoogleCloudStorageService.class);
|
||||
try {
|
||||
when(storageService.client(any(String.class))).thenReturn(new MockStorage(bucketName, new ConcurrentHashMap<>(), random()));
|
||||
} catch (final Exception e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return new GoogleCloudStorageBlobStore(bucketName, clientName, storageService);
|
||||
}
|
||||
}
|
@ -45,10 +45,6 @@ import java.security.PrivilegedActionException;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.readBlobFully;
|
||||
|
||||
|
||||
@ThreadLeakFilters(filters = {HdfsClientThreadLeakFilter.class})
|
||||
public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
|
||||
|
@ -34,10 +34,14 @@ import com.amazonaws.services.s3.model.PutObjectResult;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
import com.amazonaws.services.s3.model.UploadPartRequest;
|
||||
import com.amazonaws.services.s3.model.UploadPartResult;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.blobstore.BlobStoreException;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
||||
@ -46,10 +50,12 @@ import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.IntStream;
|
||||
|
||||
import static org.elasticsearch.repositories.s3.S3BlobStoreTests.randomMockS3BlobStore;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.instanceOf;
|
||||
import static org.mockito.Matchers.any;
|
||||
import static org.mockito.Mockito.doNothing;
|
||||
@ -397,10 +403,99 @@ public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
assertNumberOfMultiparts(factor + 1, remaining, (size * factor) + remaining, size);
|
||||
}
|
||||
|
||||
public void testInitCannedACL() {
|
||||
String[] aclList = new String[]{
|
||||
"private", "public-read", "public-read-write", "authenticated-read",
|
||||
"log-delivery-write", "bucket-owner-read", "bucket-owner-full-control"};
|
||||
|
||||
//empty acl
|
||||
assertThat(S3BlobStore.initCannedACL(null), equalTo(CannedAccessControlList.Private));
|
||||
assertThat(S3BlobStore.initCannedACL(""), equalTo(CannedAccessControlList.Private));
|
||||
|
||||
// it should init cannedACL correctly
|
||||
for (String aclString : aclList) {
|
||||
CannedAccessControlList acl = S3BlobStore.initCannedACL(aclString);
|
||||
assertThat(acl.toString(), equalTo(aclString));
|
||||
}
|
||||
|
||||
// it should accept all aws cannedACLs
|
||||
for (CannedAccessControlList awsList : CannedAccessControlList.values()) {
|
||||
CannedAccessControlList acl = S3BlobStore.initCannedACL(awsList.toString());
|
||||
assertThat(acl, equalTo(awsList));
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidCannedACL() {
|
||||
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initCannedACL("test_invalid"));
|
||||
assertThat(ex.getMessage(), equalTo("cannedACL is not valid: [test_invalid]"));
|
||||
}
|
||||
|
||||
public void testInitStorageClass() {
|
||||
// it should default to `standard`
|
||||
assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard));
|
||||
assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard));
|
||||
|
||||
// it should accept [standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering]
|
||||
assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard));
|
||||
assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("onezone_ia"), equalTo(StorageClass.OneZoneInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy));
|
||||
assertThat(S3BlobStore.initStorageClass("intelligent_tiering"), equalTo(StorageClass.IntelligentTiering));
|
||||
}
|
||||
|
||||
public void testCaseInsensitiveStorageClass() {
|
||||
assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard));
|
||||
assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("oNeZoNe_iA"), equalTo(StorageClass.OneZoneInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy));
|
||||
assertThat(S3BlobStore.initStorageClass("intelLigeNt_tieriNG"), equalTo(StorageClass.IntelligentTiering));
|
||||
}
|
||||
|
||||
public void testInvalidStorageClass() {
|
||||
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("whatever"));
|
||||
assertThat(ex.getMessage(), equalTo("`whatever` is not a valid S3 Storage Class."));
|
||||
}
|
||||
|
||||
public void testRejectGlacierStorageClass() {
|
||||
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("glacier"));
|
||||
assertThat(ex.getMessage(), equalTo("Glacier storage class is not supported"));
|
||||
}
|
||||
|
||||
private static void assertNumberOfMultiparts(final int expectedParts, final long expectedRemaining, long totalSize, long partSize) {
|
||||
final Tuple<Long, Long> result = S3BlobContainer.numberOfMultiparts(totalSize, partSize);
|
||||
|
||||
assertEquals("Expected number of parts [" + expectedParts + "] but got [" + result.v1() + "]", expectedParts, (long) result.v1());
|
||||
assertEquals("Expected remaining [" + expectedRemaining + "] but got [" + result.v2() + "]", expectedRemaining, (long) result.v2());
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link S3BlobStore} with random settings.
|
||||
* <p>
|
||||
* The blobstore uses a {@link MockAmazonS3} client.
|
||||
*/
|
||||
public static S3BlobStore randomMockS3BlobStore() {
|
||||
String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
ByteSizeValue bufferSize = new ByteSizeValue(randomIntBetween(5, 100), ByteSizeUnit.MB);
|
||||
boolean serverSideEncryption = randomBoolean();
|
||||
|
||||
String cannedACL = null;
|
||||
if (randomBoolean()) {
|
||||
cannedACL = randomFrom(CannedAccessControlList.values()).toString();
|
||||
}
|
||||
|
||||
String storageClass = null;
|
||||
if (randomBoolean()) {
|
||||
storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString();
|
||||
}
|
||||
|
||||
final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
final S3Service service = new S3Service() {
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(RepositoryMetaData repositoryMetaData) {
|
||||
return new AmazonS3Reference(client);
|
||||
}
|
||||
};
|
||||
return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass,
|
||||
new RepositoryMetaData(bucket, "s3", Settings.EMPTY));
|
||||
}
|
||||
}
|
||||
|
@ -1,133 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.repositories.s3;
|
||||
|
||||
import com.amazonaws.services.s3.AmazonS3;
|
||||
import com.amazonaws.services.s3.model.CannedAccessControlList;
|
||||
import com.amazonaws.services.s3.model.StorageClass;
|
||||
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.blobstore.BlobStoreException;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.repositories.ESBlobStoreTestCase;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class S3BlobStoreTests extends ESBlobStoreTestCase {
|
||||
|
||||
@Override
|
||||
protected BlobStore newBlobStore() {
|
||||
return randomMockS3BlobStore();
|
||||
}
|
||||
|
||||
public void testInitCannedACL() {
|
||||
String[] aclList = new String[]{
|
||||
"private", "public-read", "public-read-write", "authenticated-read",
|
||||
"log-delivery-write", "bucket-owner-read", "bucket-owner-full-control"};
|
||||
|
||||
//empty acl
|
||||
assertThat(S3BlobStore.initCannedACL(null), equalTo(CannedAccessControlList.Private));
|
||||
assertThat(S3BlobStore.initCannedACL(""), equalTo(CannedAccessControlList.Private));
|
||||
|
||||
// it should init cannedACL correctly
|
||||
for (String aclString : aclList) {
|
||||
CannedAccessControlList acl = S3BlobStore.initCannedACL(aclString);
|
||||
assertThat(acl.toString(), equalTo(aclString));
|
||||
}
|
||||
|
||||
// it should accept all aws cannedACLs
|
||||
for (CannedAccessControlList awsList : CannedAccessControlList.values()) {
|
||||
CannedAccessControlList acl = S3BlobStore.initCannedACL(awsList.toString());
|
||||
assertThat(acl, equalTo(awsList));
|
||||
}
|
||||
}
|
||||
|
||||
public void testInvalidCannedACL() {
|
||||
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initCannedACL("test_invalid"));
|
||||
assertThat(ex.getMessage(), equalTo("cannedACL is not valid: [test_invalid]"));
|
||||
}
|
||||
|
||||
public void testInitStorageClass() {
|
||||
// it should default to `standard`
|
||||
assertThat(S3BlobStore.initStorageClass(null), equalTo(StorageClass.Standard));
|
||||
assertThat(S3BlobStore.initStorageClass(""), equalTo(StorageClass.Standard));
|
||||
|
||||
// it should accept [standard, standard_ia, onezone_ia, reduced_redundancy, intelligent_tiering]
|
||||
assertThat(S3BlobStore.initStorageClass("standard"), equalTo(StorageClass.Standard));
|
||||
assertThat(S3BlobStore.initStorageClass("standard_ia"), equalTo(StorageClass.StandardInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("onezone_ia"), equalTo(StorageClass.OneZoneInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("reduced_redundancy"), equalTo(StorageClass.ReducedRedundancy));
|
||||
assertThat(S3BlobStore.initStorageClass("intelligent_tiering"), equalTo(StorageClass.IntelligentTiering));
|
||||
}
|
||||
|
||||
public void testCaseInsensitiveStorageClass() {
|
||||
assertThat(S3BlobStore.initStorageClass("sTandaRd"), equalTo(StorageClass.Standard));
|
||||
assertThat(S3BlobStore.initStorageClass("sTandaRd_Ia"), equalTo(StorageClass.StandardInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("oNeZoNe_iA"), equalTo(StorageClass.OneZoneInfrequentAccess));
|
||||
assertThat(S3BlobStore.initStorageClass("reduCED_redundancy"), equalTo(StorageClass.ReducedRedundancy));
|
||||
assertThat(S3BlobStore.initStorageClass("intelLigeNt_tieriNG"), equalTo(StorageClass.IntelligentTiering));
|
||||
}
|
||||
|
||||
public void testInvalidStorageClass() {
|
||||
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("whatever"));
|
||||
assertThat(ex.getMessage(), equalTo("`whatever` is not a valid S3 Storage Class."));
|
||||
}
|
||||
|
||||
public void testRejectGlacierStorageClass() {
|
||||
BlobStoreException ex = expectThrows(BlobStoreException.class, () -> S3BlobStore.initStorageClass("glacier"));
|
||||
assertThat(ex.getMessage(), equalTo("Glacier storage class is not supported"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new {@link S3BlobStore} with random settings.
|
||||
* <p>
|
||||
* The blobstore uses a {@link MockAmazonS3} client.
|
||||
*/
|
||||
public static S3BlobStore randomMockS3BlobStore() {
|
||||
String bucket = randomAlphaOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
|
||||
ByteSizeValue bufferSize = new ByteSizeValue(randomIntBetween(5, 100), ByteSizeUnit.MB);
|
||||
boolean serverSideEncryption = randomBoolean();
|
||||
|
||||
String cannedACL = null;
|
||||
if (randomBoolean()) {
|
||||
cannedACL = randomFrom(CannedAccessControlList.values()).toString();
|
||||
}
|
||||
|
||||
String storageClass = null;
|
||||
if (randomBoolean()) {
|
||||
storageClass = randomValueOtherThan(StorageClass.Glacier, () -> randomFrom(StorageClass.values())).toString();
|
||||
}
|
||||
|
||||
final AmazonS3 client = new MockAmazonS3(new ConcurrentHashMap<>(), bucket, serverSideEncryption, cannedACL, storageClass);
|
||||
final S3Service service = new S3Service() {
|
||||
@Override
|
||||
public synchronized AmazonS3Reference client(RepositoryMetaData repositoryMetaData) {
|
||||
return new AmazonS3Reference(client);
|
||||
}
|
||||
};
|
||||
return new S3BlobStore(service, bucket, serverSideEncryption, bufferSize, cannedACL, storageClass,
|
||||
new RepositoryMetaData(bucket, "s3", Settings.EMPTY));
|
||||
}
|
||||
}
|
@ -19,13 +19,19 @@
|
||||
package org.elasticsearch.common.blobstore.fs;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
@ -39,4 +45,37 @@ public class FsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
|
||||
}
|
||||
return new FsBlobStore(settings, createTempDir(), false);
|
||||
}
|
||||
|
||||
public void testReadOnly() throws Exception {
|
||||
Path tempDir = createTempDir();
|
||||
Path path = tempDir.resolve("bar");
|
||||
|
||||
try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, true)) {
|
||||
assertFalse(Files.exists(path));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("foo");
|
||||
store.blobContainer(blobPath);
|
||||
Path storePath = store.path();
|
||||
for (String d : blobPath) {
|
||||
storePath = storePath.resolve(d);
|
||||
}
|
||||
assertFalse(Files.exists(storePath));
|
||||
}
|
||||
|
||||
try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, false)) {
|
||||
assertTrue(Files.exists(path));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("foo");
|
||||
BlobContainer container = store.blobContainer(blobPath);
|
||||
Path storePath = store.path();
|
||||
for (String d : blobPath) {
|
||||
storePath = storePath.resolve(d);
|
||||
}
|
||||
assertTrue(Files.exists(storePath));
|
||||
assertTrue(Files.isDirectory(storePath));
|
||||
|
||||
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
writeBlob(container, "test", new BytesArray(data));
|
||||
assertArrayEquals(readBlobFully(container, "test", data.length), data);
|
||||
assertTrue(BlobStoreTestUtil.blobExists(container, "test"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,81 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.common.blobstore.fs;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.repositories.ESBlobStoreTestCase;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class FsBlobStoreTests extends ESBlobStoreTestCase {
|
||||
|
||||
protected BlobStore newBlobStore() throws IOException {
|
||||
final Settings settings;
|
||||
if (randomBoolean()) {
|
||||
settings = Settings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
||||
} else {
|
||||
settings = Settings.EMPTY;
|
||||
}
|
||||
return new FsBlobStore(settings, createTempDir(), false);
|
||||
}
|
||||
|
||||
public void testReadOnly() throws Exception {
|
||||
Path tempDir = createTempDir();
|
||||
Path path = tempDir.resolve("bar");
|
||||
|
||||
try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, true)) {
|
||||
assertFalse(Files.exists(path));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("foo");
|
||||
store.blobContainer(blobPath);
|
||||
Path storePath = store.path();
|
||||
for (String d : blobPath) {
|
||||
storePath = storePath.resolve(d);
|
||||
}
|
||||
assertFalse(Files.exists(storePath));
|
||||
}
|
||||
|
||||
try (FsBlobStore store = new FsBlobStore(Settings.EMPTY, path, false)) {
|
||||
assertTrue(Files.exists(path));
|
||||
BlobPath blobPath = BlobPath.cleanPath().add("foo");
|
||||
BlobContainer container = store.blobContainer(blobPath);
|
||||
Path storePath = store.path();
|
||||
for (String d : blobPath) {
|
||||
storePath = storePath.resolve(d);
|
||||
}
|
||||
assertTrue(Files.exists(storePath));
|
||||
assertTrue(Files.isDirectory(storePath));
|
||||
|
||||
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
writeBlob(container, "test", new BytesArray(data));
|
||||
assertArrayEquals(readBlobFully(container, "test", data.length), data);
|
||||
assertTrue(BlobStoreTestUtil.blobExists(container, "test"));
|
||||
}
|
||||
}
|
||||
}
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -36,8 +37,6 @@ import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.writeRandomBlob;
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
import static org.hamcrest.CoreMatchers.notNullValue;
|
||||
|
||||
@ -191,5 +190,51 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
|
||||
}
|
||||
}
|
||||
|
||||
public void testContainerCreationAndDeletion() throws IOException {
|
||||
try(BlobStore store = newBlobStore()) {
|
||||
final BlobContainer containerFoo = store.blobContainer(new BlobPath().add("foo"));
|
||||
final BlobContainer containerBar = store.blobContainer(new BlobPath().add("bar"));
|
||||
byte[] data1 = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
byte[] data2 = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
writeBlob(containerFoo, "test", new BytesArray(data1));
|
||||
writeBlob(containerBar, "test", new BytesArray(data2));
|
||||
|
||||
assertArrayEquals(readBlobFully(containerFoo, "test", data1.length), data1);
|
||||
assertArrayEquals(readBlobFully(containerBar, "test", data2.length), data2);
|
||||
|
||||
assertTrue(BlobStoreTestUtil.blobExists(containerFoo, "test"));
|
||||
assertTrue(BlobStoreTestUtil.blobExists(containerBar, "test"));
|
||||
}
|
||||
}
|
||||
|
||||
public static byte[] writeRandomBlob(BlobContainer container, String name, int length) throws IOException {
|
||||
byte[] data = randomBytes(length);
|
||||
writeBlob(container, name, new BytesArray(data));
|
||||
return data;
|
||||
}
|
||||
|
||||
public static byte[] readBlobFully(BlobContainer container, String name, int length) throws IOException {
|
||||
byte[] data = new byte[length];
|
||||
try (InputStream inputStream = container.readBlob(name)) {
|
||||
assertThat(inputStream.read(data), equalTo(length));
|
||||
assertThat(inputStream.read(), equalTo(-1));
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
public static byte[] randomBytes(int length) {
|
||||
byte[] data = new byte[length];
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] = (byte) randomInt();
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException {
|
||||
try (InputStream stream = bytesArray.streamInput()) {
|
||||
container.writeBlob(blobName, stream, bytesArray.length(), true);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract BlobStore newBlobStore() throws IOException;
|
||||
}
|
||||
|
@ -1,86 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||
import org.elasticsearch.common.blobstore.BlobPath;
|
||||
import org.elasticsearch.common.blobstore.BlobStore;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.repositories.blobstore.BlobStoreTestUtil;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import static org.hamcrest.CoreMatchers.equalTo;
|
||||
|
||||
/**
|
||||
* Generic test case for blob store implementation.
|
||||
* These tests check basic blob store functionality.
|
||||
*/
|
||||
public abstract class ESBlobStoreTestCase extends ESTestCase {
|
||||
|
||||
public void testContainerCreationAndDeletion() throws IOException {
|
||||
try(BlobStore store = newBlobStore()) {
|
||||
final BlobContainer containerFoo = store.blobContainer(new BlobPath().add("foo"));
|
||||
final BlobContainer containerBar = store.blobContainer(new BlobPath().add("bar"));
|
||||
byte[] data1 = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
byte[] data2 = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
|
||||
writeBlob(containerFoo, "test", new BytesArray(data1));
|
||||
writeBlob(containerBar, "test", new BytesArray(data2));
|
||||
|
||||
assertArrayEquals(readBlobFully(containerFoo, "test", data1.length), data1);
|
||||
assertArrayEquals(readBlobFully(containerBar, "test", data2.length), data2);
|
||||
|
||||
assertTrue(BlobStoreTestUtil.blobExists(containerFoo, "test"));
|
||||
assertTrue(BlobStoreTestUtil.blobExists(containerBar, "test"));
|
||||
}
|
||||
}
|
||||
|
||||
public static byte[] writeRandomBlob(BlobContainer container, String name, int length) throws IOException {
|
||||
byte[] data = randomBytes(length);
|
||||
writeBlob(container, name, new BytesArray(data));
|
||||
return data;
|
||||
}
|
||||
|
||||
public static byte[] readBlobFully(BlobContainer container, String name, int length) throws IOException {
|
||||
byte[] data = new byte[length];
|
||||
try (InputStream inputStream = container.readBlob(name)) {
|
||||
assertThat(inputStream.read(data), equalTo(length));
|
||||
assertThat(inputStream.read(), equalTo(-1));
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
public static byte[] randomBytes(int length) {
|
||||
byte[] data = new byte[length];
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
data[i] = (byte) randomInt();
|
||||
}
|
||||
return data;
|
||||
}
|
||||
|
||||
protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException {
|
||||
try (InputStream stream = bytesArray.streamInput()) {
|
||||
container.writeBlob(blobName, stream, bytesArray.length(), true);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract BlobStore newBlobStore() throws IOException;
|
||||
}
|
@ -76,7 +76,7 @@ import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.time.Clock.systemUTC;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreTestCase.randomBytes;
|
||||
import static org.elasticsearch.repositories.ESBlobStoreContainerTestCase.randomBytes;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.setState;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
Loading…
x
Reference in New Issue
Block a user