Use simpler write-once semantics for HDFS repository (#30439)

There's no need for an extra `blobExists()` call when writing a blob to the HDFS service. The writeBlob implementation for the HDFS repository already uses the `CreateFlag.CREATE` option on the file creation, which ensures that the blob that's uploaded does not already exist. This saves one network roundtrip.
This commit is contained in:
Yannick Welsch 2018-05-11 09:50:37 +02:00 committed by GitHub
parent 2594c1fb38
commit fc870fdb4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 4 additions and 6 deletions

View File

@ -100,14 +100,9 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
@Override
public void writeBlob(String blobName, InputStream inputStream, long blobSize) throws IOException {
if (blobExists(blobName)) {
throw new FileAlreadyExistsException("blob [" + blobName + "] already exists, cannot overwrite");
}
store.execute((Operation<Void>) fileContext -> {
Path blob = new Path(path, blobName);
// we pass CREATE, which means it fails if a blob already exists.
// NOTE: this behavior differs from FSBlobContainer, which passes TRUNCATE_EXISTING
// that should be fixed there, no need to bring truncation into this, give the user an error.
EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.CREATE, CreateFlag.SYNC_BLOCK);
CreateOpts[] opts = {CreateOpts.bufferSize(bufferSize)};
try (FSDataOutputStream stream = fileContext.create(blob, flags, opts)) {
@ -121,6 +116,8 @@ final class HdfsBlobContainer extends AbstractBlobContainer {
// if true synchronous behavior is required"
stream.hsync();
}
} catch (org.apache.hadoop.fs.FileAlreadyExistsException faee) {
throw new FileAlreadyExistsException(blob.toString(), null, faee.getMessage());
}
return null;
});

View File

@ -29,6 +29,7 @@ import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
import java.io.InputStream;
import java.nio.file.FileAlreadyExistsException;
import java.nio.file.NoSuchFileException;
import java.util.Arrays;
import java.util.HashMap;
@ -149,7 +150,7 @@ public abstract class ESBlobStoreContainerTestCase extends ESTestCase {
final BytesArray bytesArray = new BytesArray(data);
writeBlob(container, blobName, bytesArray);
// should not be able to overwrite existing blob
expectThrows(IOException.class, () -> writeBlob(container, blobName, bytesArray));
expectThrows(FileAlreadyExistsException.class, () -> writeBlob(container, blobName, bytesArray));
container.deleteBlob(blobName);
writeBlob(container, blobName, bytesArray); // after deleting the previous blob, we should be able to write to it again
}