Add write*Blob option to replace existing blob (#31729)

Adds a new parameter to the BlobContainer#write*Blob methods to specify whether the existing file
should be overridden or not. For some metadata files in the repository, we actually want to replace
the current file. This is currently implemented through an explicit blob delete and then a fresh write.
In case of using a cloud provider (S3, GCS, Azure), this results in 2 API requests instead of just 1.
This change will therefore allow us to achieve the same functionality using less API requests.
This commit is contained in:
Yannick Welsch 2018-07-03 09:13:50 +02:00 committed by GitHub
parent 631a53a0e1
commit 2bb4f38371
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 131 additions and 88 deletions

View File

@ -108,7 +108,7 @@ public class URLBlobContainer extends AbstractBlobContainer {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
throw new UnsupportedOperationException("URL repository doesn't support this operation"); throw new UnsupportedOperationException("URL repository doesn't support this operation");
} }

View File

@ -122,15 +122,20 @@ public class AzureStorageFixture extends AbstractHttpFixture {
handlers.insert(path, (request) -> { handlers.insert(path, (request) -> {
final String destContainerName = request.getParam("container"); final String destContainerName = request.getParam("container");
final String destBlobName = objectName(request.getParameters()); final String destBlobName = objectName(request.getParameters());
final String ifNoneMatch = request.getHeader("If-None-Match");
final Container destContainer = containers.get(destContainerName); final Container destContainer = containers.get(destContainerName);
if (destContainer == null) { if (destContainer == null) {
return newContainerNotFoundError(request.getId()); return newContainerNotFoundError(request.getId());
} }
byte[] existingBytes = destContainer.objects.putIfAbsent(destBlobName, request.getBody()); if ("*".equals(ifNoneMatch)) {
if (existingBytes != null) { byte[] existingBytes = destContainer.objects.putIfAbsent(destBlobName, request.getBody());
return newBlobAlreadyExistsError(request.getId()); if (existingBytes != null) {
return newBlobAlreadyExistsError(request.getId());
}
} else {
destContainer.objects.put(destBlobName, request.getBody());
} }
return new Response(RestStatus.CREATED.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); }) return new Response(RestStatus.CREATED.getStatus(), TEXT_PLAIN_CONTENT_TYPE, EMPTY_BYTE); })

View File

@ -86,11 +86,11 @@ public class AzureBlobContainer extends AbstractBlobContainer {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize); logger.trace("writeBlob({}, stream, {})", buildKey(blobName), blobSize);
try { try {
blobStore.writeBlob(buildKey(blobName), inputStream, blobSize); blobStore.writeBlob(buildKey(blobName), inputStream, blobSize, failIfAlreadyExists);
} catch (URISyntaxException|StorageException e) { } catch (URISyntaxException|StorageException e) {
throw new IOException("Can not write blob " + blobName, e); throw new IOException("Can not write blob " + blobName, e);
} }

View File

@ -117,8 +117,8 @@ public class AzureBlobStore extends AbstractComponent implements BlobStore {
return service.listBlobsByPrefix(clientName, container, keyPath, prefix); return service.listBlobsByPrefix(clientName, container, keyPath, prefix);
} }
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws URISyntaxException, StorageException, public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
FileAlreadyExistsException { throws URISyntaxException, StorageException, FileAlreadyExistsException {
service.writeBlob(this.clientName, container, blobName, inputStream, blobSize); service.writeBlob(this.clientName, container, blobName, inputStream, blobSize, failIfAlreadyExists);
} }
} }

View File

@ -236,17 +236,20 @@ public class AzureStorageService extends AbstractComponent {
return blobsBuilder.immutableMap(); return blobsBuilder.immutableMap();
} }
public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize,
boolean failIfAlreadyExists)
throws URISyntaxException, StorageException, FileAlreadyExistsException { throws URISyntaxException, StorageException, FileAlreadyExistsException {
logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize)); logger.trace(() -> new ParameterizedMessage("writeBlob({}, stream, {})", blobName, blobSize));
final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account); final Tuple<CloudBlobClient, Supplier<OperationContext>> client = client(account);
final CloudBlobContainer blobContainer = client.v1().getContainerReference(container); final CloudBlobContainer blobContainer = client.v1().getContainerReference(container);
final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName); final CloudBlockBlob blob = blobContainer.getBlockBlobReference(blobName);
try { try {
final AccessCondition accessCondition =
failIfAlreadyExists ? AccessCondition.generateIfNotExistsCondition() : AccessCondition.generateEmptyCondition();
SocketAccess.doPrivilegedVoidException(() -> SocketAccess.doPrivilegedVoidException(() ->
blob.upload(inputStream, blobSize, AccessCondition.generateIfNotExistsCondition(), null, client.v2().get())); blob.upload(inputStream, blobSize, accessCondition, null, client.v2().get()));
} catch (final StorageException se) { } catch (final StorageException se) {
if (se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT && if (failIfAlreadyExists && se.getHttpStatusCode() == HttpURLConnection.HTTP_CONFLICT &&
StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) { StorageErrorCodeStrings.BLOB_ALREADY_EXISTS.equals(se.getErrorCode())) {
throw new FileAlreadyExistsException(blobName, null, se.getMessage()); throw new FileAlreadyExistsException(blobName, null, se.getMessage());
} }

View File

@ -108,9 +108,10 @@ public class AzureStorageServiceMock extends AzureStorageService {
} }
@Override @Override
public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize) public void writeBlob(String account, String container, String blobName, InputStream inputStream, long blobSize,
boolean failIfAlreadyExists)
throws URISyntaxException, StorageException, FileAlreadyExistsException { throws URISyntaxException, StorageException, FileAlreadyExistsException {
if (blobs.containsKey(blobName)) { if (failIfAlreadyExists && blobs.containsKey(blobName)) {
throw new FileAlreadyExistsException(blobName); throw new FileAlreadyExistsException(blobName);
} }
try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) { try (ByteArrayOutputStream outputStream = new ByteArrayOutputStream()) {

View File

@ -158,10 +158,6 @@ public class GoogleCloudStorageFixture extends AbstractHttpFixture {
// https://cloud.google.com/storage/docs/json_api/v1/objects/insert // https://cloud.google.com/storage/docs/json_api/v1/objects/insert
handlers.insert("POST /upload/storage/v1/b/{bucket}/o", (request) -> { handlers.insert("POST /upload/storage/v1/b/{bucket}/o", (request) -> {
final String ifGenerationMatch = request.getParam("ifGenerationMatch"); final String ifGenerationMatch = request.getParam("ifGenerationMatch");
if ("0".equals(ifGenerationMatch) == false) {
return newError(RestStatus.PRECONDITION_FAILED, "object already exist");
}
final String uploadType = request.getParam("uploadType"); final String uploadType = request.getParam("uploadType");
if ("resumable".equals(uploadType)) { if ("resumable".equals(uploadType)) {
final String objectName = request.getParam("name"); final String objectName = request.getParam("name");
@ -172,12 +168,19 @@ public class GoogleCloudStorageFixture extends AbstractHttpFixture {
if (bucket == null) { if (bucket == null) {
return newError(RestStatus.NOT_FOUND, "bucket not found"); return newError(RestStatus.NOT_FOUND, "bucket not found");
} }
if (bucket.objects.putIfAbsent(objectName, EMPTY_BYTE) == null) { if ("0".equals(ifGenerationMatch)) {
final String location = /*endpoint +*/ "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id=" if (bucket.objects.putIfAbsent(objectName, EMPTY_BYTE) == null) {
final String location = /*endpoint +*/ "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id="
+ objectName; + objectName;
return newResponse(RestStatus.CREATED, singletonMap("Location", location), jsonBuilder()); return newResponse(RestStatus.CREATED, singletonMap("Location", location), jsonBuilder());
} else {
return newError(RestStatus.PRECONDITION_FAILED, "object already exist");
}
} else { } else {
return newError(RestStatus.CONFLICT, "object already exist"); bucket.objects.put(objectName, EMPTY_BYTE);
final String location = /*endpoint +*/ "/upload/storage/v1/b/" + bucket.name + "/o?uploadType=resumable&upload_id="
+ objectName;
return newResponse(RestStatus.CREATED, singletonMap("Location", location), jsonBuilder());
} }
} else if ("multipart".equals(uploadType)) { } else if ("multipart".equals(uploadType)) {
/* /*

View File

@ -64,8 +64,8 @@ class GoogleCloudStorageBlobContainer extends AbstractBlobContainer {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
blobStore.writeBlob(buildKey(blobName), inputStream, blobSize); blobStore.writeBlob(buildKey(blobName), inputStream, blobSize, failIfAlreadyExists);
} }
@Override @Override

View File

@ -193,16 +193,16 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
/** /**
* Writes a blob in the specific bucket * Writes a blob in the specific bucket
* * @param inputStream content of the blob to be written
* @param inputStream content of the blob to be written
* @param blobSize expected size of the blob to be written * @param blobSize expected size of the blob to be written
* @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists
*/ */
void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build(); final BlobInfo blobInfo = BlobInfo.newBuilder(bucketName, blobName).build();
if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) { if (blobSize > LARGE_BLOB_THRESHOLD_BYTE_SIZE) {
writeBlobResumable(blobInfo, inputStream); writeBlobResumable(blobInfo, inputStream, failIfAlreadyExists);
} else { } else {
writeBlobMultipart(blobInfo, inputStream, blobSize); writeBlobMultipart(blobInfo, inputStream, blobSize, failIfAlreadyExists);
} }
} }
@ -210,14 +210,17 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
* Uploads a blob using the "resumable upload" method (multiple requests, which * Uploads a blob using the "resumable upload" method (multiple requests, which
* can be independently retried in case of failure, see * can be independently retried in case of failure, see
* https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload * https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload
*
* @param blobInfo the info for the blob to be uploaded * @param blobInfo the info for the blob to be uploaded
* @param inputStream the stream containing the blob data * @param inputStream the stream containing the blob data
* @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists
*/ */
private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream) throws IOException { private void writeBlobResumable(BlobInfo blobInfo, InputStream inputStream, boolean failIfAlreadyExists) throws IOException {
try { try {
final Storage.BlobWriteOption[] writeOptions = failIfAlreadyExists ?
new Storage.BlobWriteOption[] { Storage.BlobWriteOption.doesNotExist() } :
new Storage.BlobWriteOption[0];
final WriteChannel writeChannel = SocketAccess final WriteChannel writeChannel = SocketAccess
.doPrivilegedIOException(() -> client().writer(blobInfo, Storage.BlobWriteOption.doesNotExist())); .doPrivilegedIOException(() -> client().writer(blobInfo, writeOptions));
Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() { Streams.copy(inputStream, Channels.newOutputStream(new WritableByteChannel() {
@Override @Override
public boolean isOpen() { public boolean isOpen() {
@ -236,7 +239,7 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
} }
})); }));
} catch (final StorageException se) { } catch (final StorageException se) {
if (se.getCode() == HTTP_PRECON_FAILED) { if (failIfAlreadyExists && se.getCode() == HTTP_PRECON_FAILED) {
throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage());
} }
throw se; throw se;
@ -248,20 +251,24 @@ class GoogleCloudStorageBlobStore extends AbstractComponent implements BlobStore
* 'multipart/related' request containing both data and metadata. The request is * 'multipart/related' request containing both data and metadata. The request is
* gziped), see: * gziped), see:
* https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload * https://cloud.google.com/storage/docs/json_api/v1/how-tos/multipart-upload
* * @param blobInfo the info for the blob to be uploaded
* @param blobInfo the info for the blob to be uploaded
* @param inputStream the stream containing the blob data * @param inputStream the stream containing the blob data
* @param blobSize the size * @param blobSize the size
* @param failIfAlreadyExists whether to throw a FileAlreadyExistsException if the given blob already exists
*/ */
private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize) throws IOException { private void writeBlobMultipart(BlobInfo blobInfo, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
throws IOException {
assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method"; assert blobSize <= LARGE_BLOB_THRESHOLD_BYTE_SIZE : "large blob uploads should use the resumable upload method";
final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize)); final ByteArrayOutputStream baos = new ByteArrayOutputStream(Math.toIntExact(blobSize));
Streams.copy(inputStream, baos); Streams.copy(inputStream, baos);
try { try {
final Storage.BlobTargetOption[] targetOptions = failIfAlreadyExists ?
new Storage.BlobTargetOption[] { Storage.BlobTargetOption.doesNotExist() } :
new Storage.BlobTargetOption[0];
SocketAccess.doPrivilegedVoidIOException( SocketAccess.doPrivilegedVoidIOException(
() -> client().create(blobInfo, baos.toByteArray(), Storage.BlobTargetOption.doesNotExist())); () -> client().create(blobInfo, baos.toByteArray(), targetOptions));
} catch (final StorageException se) { } catch (final StorageException se) {
if (se.getCode() == HTTP_PRECON_FAILED) { if (failIfAlreadyExists && se.getCode() == HTTP_PRECON_FAILED) {
throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage()); throw new FileAlreadyExistsException(blobInfo.getBlobId().getName(), null, se.getMessage());
} }
throw se; throw se;

View File

@ -91,11 +91,12 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
store.execute((Operation<Void>) fileContext -> { store.execute((Operation<Void>) fileContext -> {
Path blob = new Path(path, blobName); Path blob = new Path(path, blobName);
// we pass CREATE, which means it fails if a blob already exists. // we pass CREATE, which means it fails if a blob already exists.
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK); EnumSet<CreateFlag> flags = failIfAlreadyExists ? EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK) :
EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK);
CreateOpts[] opts = {CreateOpts.bufferSize(bufferSize)}; CreateOpts[] opts = {CreateOpts.bufferSize(bufferSize)};
try (FSDataOutputStream stream = fileContext.create(blob, flags, opts)) { try (FSDataOutputStream stream = fileContext.create(blob, flags, opts)) {
int bytesRead; int bytesRead;

View File

@ -135,7 +135,7 @@ public class HdfsBlobStoreContainerTests extends ESBlobStoreContainerTestCase {
assertTrue(util.exists(hdfsPath)); assertTrue(util.exists(hdfsPath));
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
writeBlob(container, "foo", new BytesArray(data)); writeBlob(container, "foo", new BytesArray(data), randomBoolean());
assertArrayEquals(readBlobFully(container, "foo", data.length), data); assertArrayEquals(readBlobFully(container, "foo", data.length), data);
assertTrue(container.blobExists("foo")); assertTrue(container.blobExists("foo"));
} }

View File

@ -90,8 +90,11 @@ class S3BlobContainer extends AbstractBlobContainer {
} }
} }
/**
* This implementation ignores the failIfAlreadyExists flag as the S3 API has no way to enforce this due to its weak consistency model.
*/
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
SocketAccess.doPrivilegedIOException(() -> { SocketAccess.doPrivilegedIOException(() -> {
if (blobSize <= blobStore.bufferSizeInBytes()) { if (blobSize <= blobStore.bufferSizeInBytes()) {
executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize); executeSingleUpload(blobStore, buildKey(blobName), inputStream, blobSize);

View File

@ -69,16 +69,18 @@ public interface BlobContainer {
* @param blobSize * @param blobSize
* The size of the blob to be written, in bytes. It is implementation dependent whether * The size of the blob to be written, in bytes. It is implementation dependent whether
* this value is used in writing the blob to the repository. * this value is used in writing the blob to the repository.
* @throws FileAlreadyExistsException if a blob by the same name already exists * @param failIfAlreadyExists
* whether to throw a FileAlreadyExistsException if the given blob already exists
* @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists
* @throws IOException if the input stream could not be read, or the target blob could not be written to. * @throws IOException if the input stream could not be read, or the target blob could not be written to.
*/ */
void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException; void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException;
/** /**
* Reads blob content from the input stream and writes it to the container in a new blob with the given name, * Reads blob content from the input stream and writes it to the container in a new blob with the given name,
* using an atomic write operation if the implementation supports it. When the BlobContainer implementation * using an atomic write operation if the implementation supports it. When the BlobContainer implementation
* does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then * does not provide a specific implementation of writeBlobAtomic(String, InputStream, long), then
* the {@link #writeBlob(String, InputStream, long)} method is used. * the {@link #writeBlob(String, InputStream, long, boolean)} method is used.
* *
* This method assumes the container does not already contain a blob of the same blobName. If a blob by the * This method assumes the container does not already contain a blob of the same blobName. If a blob by the
* same name already exists, the operation will fail and an {@link IOException} will be thrown. * same name already exists, the operation will fail and an {@link IOException} will be thrown.
@ -90,11 +92,14 @@ public interface BlobContainer {
* @param blobSize * @param blobSize
* The size of the blob to be written, in bytes. It is implementation dependent whether * The size of the blob to be written, in bytes. It is implementation dependent whether
* this value is used in writing the blob to the repository. * this value is used in writing the blob to the repository.
* @throws FileAlreadyExistsException if a blob by the same name already exists * @param failIfAlreadyExists
* whether to throw a FileAlreadyExistsException if the given blob already exists
* @throws FileAlreadyExistsException if failIfAlreadyExists is true and a blob by the same name already exists
* @throws IOException if the input stream could not be read, or the target blob could not be written to. * @throws IOException if the input stream could not be read, or the target blob could not be written to.
*/ */
default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { default void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists)
writeBlob(blobName, inputStream, blobSize); throws IOException {
writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
} }
/** /**

View File

@ -124,7 +124,10 @@ public class FsBlobContainer extends AbstractBlobContainer {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
if (failIfAlreadyExists == false) {
deleteBlobIgnoringIfNotExists(blobName);
}
final Path file = path.resolve(blobName); final Path file = path.resolve(blobName);
try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) { try (OutputStream outputStream = Files.newOutputStream(file, StandardOpenOption.CREATE_NEW)) {
Streams.copy(inputStream, outputStream); Streams.copy(inputStream, outputStream);
@ -134,7 +137,8 @@ public class FsBlobContainer extends AbstractBlobContainer {
} }
@Override @Override
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize, boolean failIfAlreadyExists)
throws IOException {
final String tempBlob = tempBlobName(blobName); final String tempBlob = tempBlobName(blobName);
final Path tempBlobPath = path.resolve(tempBlob); final Path tempBlobPath = path.resolve(tempBlob);
try { try {
@ -142,7 +146,7 @@ public class FsBlobContainer extends AbstractBlobContainer {
Streams.copy(inputStream, outputStream); Streams.copy(inputStream, outputStream);
} }
IOUtils.fsync(tempBlobPath, false); IOUtils.fsync(tempBlobPath, false);
moveBlobAtomic(tempBlob, blobName); moveBlobAtomic(tempBlob, blobName, failIfAlreadyExists);
} catch (IOException ex) { } catch (IOException ex) {
try { try {
deleteBlobIgnoringIfNotExists(tempBlob); deleteBlobIgnoringIfNotExists(tempBlob);
@ -155,13 +159,18 @@ public class FsBlobContainer extends AbstractBlobContainer {
} }
} }
public void moveBlobAtomic(final String sourceBlobName, final String targetBlobName) throws IOException { public void moveBlobAtomic(final String sourceBlobName, final String targetBlobName, final boolean failIfAlreadyExists)
throws IOException {
final Path sourceBlobPath = path.resolve(sourceBlobName); final Path sourceBlobPath = path.resolve(sourceBlobName);
final Path targetBlobPath = path.resolve(targetBlobName); final Path targetBlobPath = path.resolve(targetBlobName);
// If the target file exists then Files.move() behaviour is implementation specific // If the target file exists then Files.move() behaviour is implementation specific
// the existing file might be replaced or this method fails by throwing an IOException. // the existing file might be replaced or this method fails by throwing an IOException.
if (Files.exists(targetBlobPath)) { if (Files.exists(targetBlobPath)) {
throw new FileAlreadyExistsException("blob [" + targetBlobPath + "] already exists, cannot overwrite"); if (failIfAlreadyExists) {
throw new FileAlreadyExistsException("blob [" + targetBlobPath + "] already exists, cannot overwrite");
} else {
deleteBlobIgnoringIfNotExists(targetBlobName);
}
} }
Files.move(sourceBlobPath, targetBlobPath, StandardCopyOption.ATOMIC_MOVE); Files.move(sourceBlobPath, targetBlobPath, StandardCopyOption.ATOMIC_MOVE);
} }

View File

@ -556,7 +556,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
String blobName = "master.dat"; String blobName = "master.dat";
BytesArray bytes = new BytesArray(testBytes); BytesArray bytes = new BytesArray(testBytes);
try (InputStream stream = bytes.streamInput()) { try (InputStream stream = bytes.streamInput()) {
testContainer.writeBlobAtomic(blobName, stream, bytes.length()); testContainer.writeBlobAtomic(blobName, stream, bytes.length(), true);
} }
return seed; return seed;
} }
@ -664,7 +664,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
// write the index file // write the index file
final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen); final String indexBlob = INDEX_FILE_PREFIX + Long.toString(newGen);
logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob);
writeAtomic(indexBlob, snapshotsBytes); writeAtomic(indexBlob, snapshotsBytes, true);
// delete the N-2 index file if it exists, keep the previous one around as a backup // delete the N-2 index file if it exists, keep the previous one around as a backup
if (isReadOnly() == false && newGen - 2 >= 0) { if (isReadOnly() == false && newGen - 2 >= 0) {
final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2); final String oldSnapshotIndexFile = INDEX_FILE_PREFIX + Long.toString(newGen - 2);
@ -677,9 +677,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
bStream.writeLong(newGen); bStream.writeLong(newGen);
genBytes = bStream.bytes(); genBytes = bStream.bytes();
} }
snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(INDEX_LATEST_BLOB);
logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen);
writeAtomic(INDEX_LATEST_BLOB, genBytes); writeAtomic(INDEX_LATEST_BLOB, genBytes, false);
} }
/** /**
@ -698,9 +697,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
} }
bytes = bStream.bytes(); bytes = bStream.bytes();
} }
snapshotsBlobContainer.deleteBlobIgnoringIfNotExists(INCOMPATIBLE_SNAPSHOTS_BLOB);
// write the incompatible snapshots blob // write the incompatible snapshots blob
writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes); writeAtomic(INCOMPATIBLE_SNAPSHOTS_BLOB, bytes, false);
} }
/** /**
@ -766,9 +764,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
return latest; return latest;
} }
private void writeAtomic(final String blobName, final BytesReference bytesRef) throws IOException { private void writeAtomic(final String blobName, final BytesReference bytesRef, boolean failIfAlreadyExists) throws IOException {
try (InputStream stream = bytesRef.streamInput()) { try (InputStream stream = bytesRef.streamInput()) {
snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length()); snapshotsBlobContainer.writeBlobAtomic(blobName, stream, bytesRef.length(), failIfAlreadyExists);
} }
} }
@ -813,7 +811,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
try { try {
BytesArray bytes = new BytesArray(seed); BytesArray bytes = new BytesArray(seed);
try (InputStream stream = bytes.streamInput()) { try (InputStream stream = bytes.streamInput()) {
testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length()); testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length(), true);
} }
} catch (IOException exp) { } catch (IOException exp) {
throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp); throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp);
@ -1252,7 +1250,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
snapshotRateLimitingTimeInNanos::inc); snapshotRateLimitingTimeInNanos::inc);
} }
inputStream = new AbortableInputStream(inputStream, fileInfo.physicalName()); inputStream = new AbortableInputStream(inputStream, fileInfo.physicalName());
blobContainer.writeBlob(fileInfo.partName(i), inputStream, partBytes); blobContainer.writeBlob(fileInfo.partName(i), inputStream, partBytes, true);
} }
Store.verify(indexInput); Store.verify(indexInput);
snapshotStatus.addProcessedFile(fileInfo.length()); snapshotStatus.addProcessedFile(fileInfo.length());

View File

@ -132,7 +132,7 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
final String blobName = blobName(name); final String blobName = blobName(name);
writeTo(obj, blobName, bytesArray -> { writeTo(obj, blobName, bytesArray -> {
try (InputStream stream = bytesArray.streamInput()) { try (InputStream stream = bytesArray.streamInput()) {
blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length()); blobContainer.writeBlobAtomic(blobName, stream, bytesArray.length(), true);
} }
}); });
} }
@ -150,7 +150,7 @@ public class ChecksumBlobStoreFormat<T extends ToXContent> extends BlobStoreForm
final String blobName = blobName(name); final String blobName = blobName(name);
writeTo(obj, blobName, bytesArray -> { writeTo(obj, blobName, bytesArray -> {
try (InputStream stream = bytesArray.streamInput()) { try (InputStream stream = bytesArray.streamInput()) {
blobContainer.writeBlob(blobName, stream, bytesArray.length()); blobContainer.writeBlob(blobName, stream, bytesArray.length(), true);
} }
}); });
} }

View File

@ -224,7 +224,8 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
IOException writeBlobException = expectThrows(IOException.class, () -> { IOException writeBlobException = expectThrows(IOException.class, () -> {
BlobContainer wrapper = new BlobContainerWrapper(blobContainer) { BlobContainer wrapper = new BlobContainerWrapper(blobContainer) {
@Override @Override
public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlobAtomic(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists)
throws IOException {
throw new IOException("Exception thrown in writeBlobAtomic() for " + blobName); throw new IOException("Exception thrown in writeBlobAtomic() for " + blobName);
} }
}; };
@ -251,10 +252,9 @@ public class BlobStoreFormatIT extends AbstractSnapshotIntegTestCase {
int location = randomIntBetween(0, buffer.length - 1); int location = randomIntBetween(0, buffer.length - 1);
buffer[location] = (byte) (buffer[location] ^ 42); buffer[location] = (byte) (buffer[location] ^ 42);
} while (originalChecksum == checksum(buffer)); } while (originalChecksum == checksum(buffer));
blobContainer.deleteBlob(blobName); // delete original before writing new blob
BytesArray bytesArray = new BytesArray(buffer); BytesArray bytesArray = new BytesArray(buffer);
try (StreamInput stream = bytesArray.streamInput()) { try (StreamInput stream = bytesArray.streamInput()) {
blobContainer.writeBlob(blobName, stream, bytesArray.length()); blobContainer.writeBlob(blobName, stream, bytesArray.length(), false);
} }
} }

View File

@ -49,13 +49,14 @@ public class BlobContainerWrapper implements BlobContainer {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
delegate.writeBlob(blobName, inputStream, blobSize); delegate.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
} }
@Override @Override
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize,
delegate.writeBlobAtomic(blobName, inputStream, blobSize); boolean failIfAlreadyExists) throws IOException {
delegate.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists);
} }
@Override @Override

View File

@ -346,9 +346,9 @@ public class MockRepository extends FsRepository {
} }
@Override @Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException { public void writeBlob(String blobName, InputStream inputStream, long blobSize, boolean failIfAlreadyExists) throws IOException {
maybeIOExceptionOrBlock(blobName); maybeIOExceptionOrBlock(blobName);
super.writeBlob(blobName, inputStream, blobSize); super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
if (RandomizedContext.current().getRandom().nextBoolean()) { if (RandomizedContext.current().getRandom().nextBoolean()) {
// for network based repositories, the blob may have been written but we may still // for network based repositories, the blob may have been written but we may still
// get an error with the client connection, so an IOException here simulates this // get an error with the client connection, so an IOException here simulates this
@ -357,27 +357,28 @@ public class MockRepository extends FsRepository {
} }
@Override @Override
public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize) throws IOException { public void writeBlobAtomic(final String blobName, final InputStream inputStream, final long blobSize,
final boolean failIfAlreadyExists) throws IOException {
final Random random = RandomizedContext.current().getRandom(); final Random random = RandomizedContext.current().getRandom();
if (allowAtomicOperations && random.nextBoolean()) { if (allowAtomicOperations && random.nextBoolean()) {
if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) { if ((delegate() instanceof FsBlobContainer) && (random.nextBoolean())) {
// Simulate a failure between the write and move operation in FsBlobContainer // Simulate a failure between the write and move operation in FsBlobContainer
final String tempBlobName = FsBlobContainer.tempBlobName(blobName); final String tempBlobName = FsBlobContainer.tempBlobName(blobName);
super.writeBlob(tempBlobName, inputStream, blobSize); super.writeBlob(tempBlobName, inputStream, blobSize, failIfAlreadyExists);
maybeIOExceptionOrBlock(blobName); maybeIOExceptionOrBlock(blobName);
final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate(); final FsBlobContainer fsBlobContainer = (FsBlobContainer) delegate();
fsBlobContainer.moveBlobAtomic(tempBlobName, blobName); fsBlobContainer.moveBlobAtomic(tempBlobName, blobName, failIfAlreadyExists);
} else { } else {
// Atomic write since it is potentially supported // Atomic write since it is potentially supported
// by the delegating blob container // by the delegating blob container
maybeIOExceptionOrBlock(blobName); maybeIOExceptionOrBlock(blobName);
super.writeBlobAtomic(blobName, inputStream, blobSize); super.writeBlobAtomic(blobName, inputStream, blobSize, failIfAlreadyExists);
} }
} else { } else {
// Simulate a non-atomic write since many blob container // Simulate a non-atomic write since many blob container
// implementations does not support atomic write // implementations does not support atomic write
maybeIOExceptionOrBlock(blobName); maybeIOExceptionOrBlock(blobName);
super.writeBlob(blobName, inputStream, blobSize); super.writeBlob(blobName, inputStream, blobSize, failIfAlreadyExists);
} }
} }
} }

View File

@ -61,7 +61,12 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
try(BlobStore store = newBlobStore()) { try(BlobStore store = newBlobStore()) {
final BlobContainer container = store.blobContainer(new BlobPath()); final BlobContainer container = store.blobContainer(new BlobPath());
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
writeBlob(container, "foobar", new BytesArray(data)); writeBlob(container, "foobar", new BytesArray(data), randomBoolean());
if (randomBoolean()) {
// override file, to check if we get latest contents
data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
writeBlob(container, "foobar", new BytesArray(data), false);
}
try (InputStream stream = container.readBlob("foobar")) { try (InputStream stream = container.readBlob("foobar")) {
BytesRefBuilder target = new BytesRefBuilder(); BytesRefBuilder target = new BytesRefBuilder();
while (target.length() < data.length) { while (target.length() < data.length) {
@ -123,7 +128,7 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
final BytesArray bytesArray = new BytesArray(data); final BytesArray bytesArray = new BytesArray(data);
writeBlob(container, blobName, bytesArray); writeBlob(container, blobName, bytesArray, randomBoolean());
container.deleteBlob(blobName); // should not raise container.deleteBlob(blobName); // should not raise
// blob deleted, so should raise again // blob deleted, so should raise again
@ -149,20 +154,21 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
final BlobContainer container = store.blobContainer(new BlobPath()); final BlobContainer container = store.blobContainer(new BlobPath());
byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16))); byte[] data = randomBytes(randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16)));
final BytesArray bytesArray = new BytesArray(data); final BytesArray bytesArray = new BytesArray(data);
writeBlob(container, blobName, bytesArray); writeBlob(container, blobName, bytesArray, true);
// should not be able to overwrite existing blob // should not be able to overwrite existing blob
expectThrows(FileAlreadyExistsException.class, () -> writeBlob(container, blobName, bytesArray)); expectThrows(FileAlreadyExistsException.class, () -> writeBlob(container, blobName, bytesArray, true));
container.deleteBlob(blobName); container.deleteBlob(blobName);
writeBlob(container, blobName, bytesArray); // after deleting the previous blob, we should be able to write to it again writeBlob(container, blobName, bytesArray, true); // after deleting the previous blob, we should be able to write to it again
} }
} }
protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray) throws IOException { protected void writeBlob(final BlobContainer container, final String blobName, final BytesArray bytesArray,
boolean failIfAlreadyExists) throws IOException {
try (InputStream stream = bytesArray.streamInput()) { try (InputStream stream = bytesArray.streamInput()) {
if (randomBoolean()) { if (randomBoolean()) {
container.writeBlob(blobName, stream, bytesArray.length()); container.writeBlob(blobName, stream, bytesArray.length(), failIfAlreadyExists);
} else { } else {
container.writeBlobAtomic(blobName, stream, bytesArray.length()); container.writeBlobAtomic(blobName, stream, bytesArray.length(), failIfAlreadyExists);
} }
} }
} }

View File

@ -80,7 +80,7 @@ public abstract class ESBlobStoreTestCase extends ESTestCase {
protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException { protected static void writeBlob(BlobContainer container, String blobName, BytesArray bytesArray) throws IOException {
try (InputStream stream = bytesArray.streamInput()) { try (InputStream stream = bytesArray.streamInput()) {
container.writeBlob(blobName, stream, bytesArray.length()); container.writeBlob(blobName, stream, bytesArray.length(), true);
} }
} }