Revert "Raised IOException on deleteBlob (#18815)"

This reverts commit d24cc65cad as it seems to be causing test failures.
This commit is contained in:
javanna 2016-07-01 11:00:32 +02:00 committed by Luca Cavanna
parent 9bfc23e958
commit 598c36128e
10 changed files with 12 additions and 432 deletions

View File

@ -85,7 +85,7 @@ public class FsBlobContainer extends AbstractBlobContainer {
@Override
public void deleteBlob(String blobName) throws IOException {
Path blobPath = path.resolve(blobName);
Files.delete(blobPath);
Files.deleteIfExists(blobPath);
}
@Override

View File

@ -70,11 +70,6 @@ public class AzureBlobContainer extends AbstractBlobContainer {
@Override
public InputStream readBlob(String blobName) throws IOException {
logger.trace("readBlob({})", blobName);
if (!blobExists(blobName)) {
throw new IOException("Blob [" + blobName + "] does not exist");
}
try {
return blobStore.getInputStream(blobStore.container(), buildKey(blobName));
} catch (StorageException e) {
@ -121,11 +116,6 @@ public class AzureBlobContainer extends AbstractBlobContainer {
@Override
public void deleteBlob(String blobName) throws IOException {
logger.trace("deleteBlob({})", blobName);
if (!blobExists(blobName)) {
throw new IOException("Blob [" + blobName + "] does not exist");
}
try {
blobStore.deleteBlob(blobStore.container(), buildKey(blobName));
} catch (URISyntaxException | StorageException e) {

View File

@ -95,13 +95,13 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent<AzureSto
MapBuilder<String, BlobMetaData> blobsBuilder = MapBuilder.newMapBuilder();
for (String blobName : blobs.keySet()) {
final String checkBlob;
if (keyPath != null && !keyPath.isEmpty()) {
if (keyPath != null) {
// strip off key path from the beginning of the blob name
checkBlob = blobName.replace(keyPath, "");
} else {
checkBlob = blobName;
}
if (prefix == null || startsWithIgnoreCase(checkBlob, prefix)) {
if (startsWithIgnoreCase(checkBlob, prefix)) {
blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, blobs.get(blobName).size()));
}
}

View File

@ -1,47 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.azure;
import com.microsoft.azure.storage.StorageException;
import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore;
import org.elasticsearch.cloud.azure.storage.AzureStorageServiceMock;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
import org.elasticsearch.repositories.RepositoryName;
import org.elasticsearch.repositories.RepositorySettings;
import java.io.IOException;
import java.net.URISyntaxException;
public class AzureBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
@Override
protected BlobStore newBlobStore() throws IOException {
try {
RepositoryName repositoryName = new RepositoryName("azure", "ittest");
RepositorySettings repositorySettings = new RepositorySettings(
Settings.EMPTY, Settings.EMPTY);
AzureStorageServiceMock client = new AzureStorageServiceMock(Settings.EMPTY);
return new AzureBlobStore(repositoryName, Settings.EMPTY, repositorySettings, client);
} catch (URISyntaxException | StorageException e) {
throw new IOException(e);
}
}
}

View File

@ -68,16 +68,16 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
@Override
public void deleteBlob(String blobName) throws IOException {
if (!blobExists(blobName)) {
throw new IOException("Blob [" + blobName + "] does not exist");
try {
store.execute(new Operation<Boolean>() {
@Override
public Boolean run(FileContext fileContext) throws IOException {
return fileContext.delete(new Path(path, blobName), true);
}
});
} catch (FileNotFoundException ok) {
// behaves like Files.deleteIfExists
}
store.execute(new Operation<Boolean>() {
@Override
public Boolean run(FileContext fileContext) throws IOException {
return fileContext.delete(new Path(path, blobName), true);
}
});
}
@Override

View File

@ -1,104 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.repositories.hdfs;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
import javax.security.auth.Subject;
import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.AccessController;
import java.security.Principal;
import java.security.PrivilegedAction;
import java.util.Collections;
public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
@Override
protected BlobStore newBlobStore() throws IOException {
return AccessController.doPrivileged(
new PrivilegedAction<HdfsBlobStore>() {
@Override
public HdfsBlobStore run() {
try {
FileContext fileContext = createContext(new URI("hdfs:///"));
return new HdfsBlobStore(fileContext, "temp", 1024);
} catch (IOException | URISyntaxException e) {
throw new RuntimeException(e);
}
}
});
}
public FileContext createContext(URI uri) {
// mirrors HdfsRepository.java behaviour
Configuration cfg = new Configuration(true);
cfg.setClassLoader(HdfsRepository.class.getClassLoader());
cfg.reloadConfiguration();
Constructor<?> ctor;
Subject subject;
try {
Class<?> clazz = Class.forName("org.apache.hadoop.security.User");
ctor = clazz.getConstructor(String.class);
ctor.setAccessible(true);
} catch (ClassNotFoundException | NoSuchMethodException e) {
throw new RuntimeException(e);
}
try {
Principal principal = (Principal) ctor.newInstance(System.getProperty("user.name"));
subject = new Subject(false, Collections.singleton(principal),
Collections.emptySet(), Collections.emptySet());
} catch (InstantiationException | IllegalAccessException | InvocationTargetException e) {
throw new RuntimeException(e);
}
// disable file system cache
cfg.setBoolean("fs.hdfs.impl.disable.cache", true);
// set file system to TestingFs to avoid a bunch of security
// checks, similar to what is done in HdfsTests.java
cfg.set(String.format("fs.AbstractFileSystem.%s.impl", uri.getScheme()),
TestingFs.class.getName());
// create the FileContext with our user
return Subject.doAs(subject, new PrivilegedAction<FileContext>() {
@Override
public FileContext run() {
try {
TestingFs fs = (TestingFs) AbstractFileSystem.get(uri, cfg);
return FileContext.getFileContext(fs, cfg);
} catch (UnsupportedFileSystemException e) {
throw new RuntimeException(e);
}
}
});
}
}

View File

@ -118,10 +118,6 @@ public class S3BlobContainer extends AbstractBlobContainer {
@Override
public void deleteBlob(String blobName) throws IOException {
if (!blobExists(blobName)) {
throw new IOException("Blob [" + blobName + "] does not exist");
}
try {
blobStore.client().deleteObject(blobStore.bucket(), buildKey(blobName));
} catch (AmazonClientException e) {

View File

@ -1,200 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cloud.aws.blobstore;
import com.amazonaws.AmazonClientException;
import com.amazonaws.AmazonServiceException;
import com.amazonaws.services.s3.AbstractAmazonS3;
import com.amazonaws.services.s3.model.AmazonS3Exception;
import com.amazonaws.services.s3.model.CopyObjectRequest;
import com.amazonaws.services.s3.model.CopyObjectResult;
import com.amazonaws.services.s3.model.DeleteObjectRequest;
import com.amazonaws.services.s3.model.GetObjectMetadataRequest;
import com.amazonaws.services.s3.model.GetObjectRequest;
import com.amazonaws.services.s3.model.ListObjectsRequest;
import com.amazonaws.services.s3.model.ObjectListing;
import com.amazonaws.services.s3.model.ObjectMetadata;
import com.amazonaws.services.s3.model.PutObjectRequest;
import com.amazonaws.services.s3.model.PutObjectResult;
import com.amazonaws.services.s3.model.S3Object;
import com.amazonaws.services.s3.model.S3ObjectInputStream;
import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.amazonaws.util.Base64;
import java.io.IOException;
import java.io.InputStream;
import java.security.DigestInputStream;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.ConcurrentHashMap;
class MockAmazonS3 extends AbstractAmazonS3 {
private Map<String, InputStream> blobs = new ConcurrentHashMap<>();
// in ESBlobStoreContainerTestCase.java, the maximum
// length of the input data is 100 bytes
private byte[] byteCounter = new byte[100];
@Override
public boolean doesBucketExist(String bucket) {
return true;
}
@Override
public ObjectMetadata getObjectMetadata(
GetObjectMetadataRequest getObjectMetadataRequest)
throws AmazonClientException, AmazonServiceException {
String blobName = getObjectMetadataRequest.getKey();
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
}
return new ObjectMetadata(); // nothing is done with it
}
@Override
public PutObjectResult putObject(PutObjectRequest putObjectRequest)
throws AmazonClientException, AmazonServiceException {
String blobName = putObjectRequest.getKey();
DigestInputStream stream = (DigestInputStream) putObjectRequest.getInputStream();
if (blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] already exists.");
}
blobs.put(blobName, stream);
// input and output md5 hashes need to match to avoid an exception
String md5 = Base64.encodeAsString(stream.getMessageDigest().digest());
PutObjectResult result = new PutObjectResult();
result.setContentMd5(md5);
return result;
}
@Override
public S3Object getObject(GetObjectRequest getObjectRequest)
throws AmazonClientException, AmazonServiceException {
// in ESBlobStoreContainerTestCase.java, the prefix is empty,
// so the key and blobName are equivalent to each other
String blobName = getObjectRequest.getKey();
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
}
// the HTTP request attribute is irrelevant for reading
S3ObjectInputStream stream = new S3ObjectInputStream(
blobs.get(blobName), null, false);
S3Object s3Object = new S3Object();
s3Object.setObjectContent(stream);
return s3Object;
}
@Override
public ObjectListing listObjects(ListObjectsRequest listObjectsRequest)
throws AmazonClientException, AmazonServiceException {
MockObjectListing list = new MockObjectListing();
list.setTruncated(false);
String blobName;
String prefix = listObjectsRequest.getPrefix();
ArrayList<S3ObjectSummary> mockObjectSummaries = new ArrayList<>();
for (Map.Entry<String, InputStream> blob : blobs.entrySet()) {
blobName = blob.getKey();
S3ObjectSummary objectSummary = new S3ObjectSummary();
if (prefix.isEmpty() || blobName.startsWith(prefix)) {
objectSummary.setKey(blobName);
try {
objectSummary.setSize(getSize(blob.getValue()));
} catch (IOException e) {
throw new AmazonS3Exception("Object listing " +
"failed for blob [" + blob.getKey() + "]");
}
mockObjectSummaries.add(objectSummary);
}
}
list.setObjectSummaries(mockObjectSummaries);
return list;
}
@Override
public CopyObjectResult copyObject(CopyObjectRequest copyObjectRequest)
throws AmazonClientException, AmazonServiceException {
String sourceBlobName = copyObjectRequest.getSourceKey();
String targetBlobName = copyObjectRequest.getDestinationKey();
if (!blobs.containsKey(sourceBlobName)) {
throw new AmazonS3Exception("Source blob [" +
sourceBlobName + "] does not exist.");
}
if (blobs.containsKey(targetBlobName)) {
throw new AmazonS3Exception("Target blob [" +
targetBlobName + "] already exists.");
}
blobs.put(targetBlobName, blobs.get(sourceBlobName));
return new CopyObjectResult(); // nothing is done with it
}
@Override
public void deleteObject(DeleteObjectRequest deleteObjectRequest)
throws AmazonClientException, AmazonServiceException {
String blobName = deleteObjectRequest.getKey();
if (!blobs.containsKey(blobName)) {
throw new AmazonS3Exception("[" + blobName + "] does not exist.");
}
blobs.remove(blobName);
}
private int getSize(InputStream stream) throws IOException {
int size = stream.read(byteCounter);
stream.reset(); // in case we ever need the size again
return size;
}
private class MockObjectListing extends ObjectListing {
// the objectSummaries attribute in ObjectListing.java
// is read-only, but we need to be able to write to it,
// so we create a mock of it to work around this
private List<S3ObjectSummary> mockObjectSummaries;
@Override
public List<S3ObjectSummary> getObjectSummaries() {
return mockObjectSummaries;
}
private void setObjectSummaries(List<S3ObjectSummary> objectSummaries) {
mockObjectSummaries = objectSummaries;
}
}
}

View File

@ -1,39 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cloud.aws.blobstore;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.repositories.ESBlobStoreContainerTestCase;
import java.io.IOException;
import java.util.Locale;
public class S3BlobStoreContainerTests extends ESBlobStoreContainerTestCase {
protected BlobStore newBlobStore() throws IOException {
MockAmazonS3 client = new MockAmazonS3();
String bucket = randomAsciiOfLength(randomIntBetween(1, 10)).toLowerCase(Locale.ROOT);
return new S3BlobStore(Settings.EMPTY, client, bucket, null, false,
new ByteSizeValue(10, ByteSizeUnit.MB), 5, "public-read-write", "standard");
}
}

View File

@ -112,22 +112,6 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
}
}
public void testDeleteBlob() throws IOException {
try (final BlobStore store = newBlobStore()) {
final String blobName = "foobar";
final BlobContainer container = store.blobContainer(new BlobPath());
expectThrows(IOException.class, () -> container.deleteBlob(blobName));
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
final BytesArray bytesArray = new BytesArray(data);
container.writeBlob(blobName, bytesArray);
container.deleteBlob(blobName); // should not raise
// blob deleted, so should raise again
expectThrows(IOException.class, () -> container.deleteBlob(blobName));
}
}
@AwaitsFix(bugUrl = "https://github.com/elastic/elasticsearch/issues/15579")
public void testOverwriteFails() throws IOException {
try (final BlobStore store = newBlobStore()) {