[STORE] Simplify reading / writing from and to BlobContainer
BlobContainer used to provide async APIs which are not used internally. The implementation of these APIs are also not async by nature and neither is any of the pluggable BlobContainers. This commit simplifies the API to a simple input / output stream and reduces the hierarchy of BlobContainer dramatically. NOTE: This is a breaking change! Closes #7551
This commit is contained in:
parent
6a0a7afea6
commit
7f32e8c707
|
@ -41,11 +41,6 @@ pools, but the important ones include:
|
||||||
keep-alive `5m`,
|
keep-alive `5m`,
|
||||||
size `(# of available processors)/2`.
|
size `(# of available processors)/2`.
|
||||||
|
|
||||||
`snapshot_data`::
|
|
||||||
For snapshot/restore operations on data files, defaults to `scaling`
|
|
||||||
with a `5m` keep-alive,
|
|
||||||
size `5`.
|
|
||||||
|
|
||||||
`warmer`::
|
`warmer`::
|
||||||
For segment warm-up operations, defaults to `scaling`
|
For segment warm-up operations, defaults to `scaling`
|
||||||
with a `5m` keep-alive.
|
with a `5m` keep-alive.
|
||||||
|
|
|
@ -22,6 +22,8 @@ package org.elasticsearch.common.blobstore;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -35,22 +37,19 @@ public interface BlobContainer {
|
||||||
boolean accept(String blobName);
|
boolean accept(String blobName);
|
||||||
}
|
}
|
||||||
|
|
||||||
interface ReadBlobListener {
|
|
||||||
|
|
||||||
void onPartial(byte[] data, int offset, int size) throws IOException;
|
|
||||||
|
|
||||||
void onCompleted();
|
|
||||||
|
|
||||||
void onFailure(Throwable t);
|
|
||||||
}
|
|
||||||
|
|
||||||
BlobPath path();
|
BlobPath path();
|
||||||
|
|
||||||
boolean blobExists(String blobName);
|
boolean blobExists(String blobName);
|
||||||
|
|
||||||
void readBlob(String blobName, ReadBlobListener listener);
|
/**
|
||||||
|
* Creates a new {@link InputStream} for the given blob name
|
||||||
|
*/
|
||||||
|
InputStream openInput(String blobName) throws IOException;
|
||||||
|
|
||||||
byte[] readBlobFully(String blobName) throws IOException;
|
/**
|
||||||
|
* Creates a new OutputStream for the given blob name
|
||||||
|
*/
|
||||||
|
OutputStream createOutput(String blobName) throws IOException;
|
||||||
|
|
||||||
boolean deleteBlob(String blobName) throws IOException;
|
boolean deleteBlob(String blobName) throws IOException;
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,7 @@ package org.elasticsearch.common.blobstore;
|
||||||
*/
|
*/
|
||||||
public interface BlobStore {
|
public interface BlobStore {
|
||||||
|
|
||||||
ImmutableBlobContainer immutableBlobContainer(BlobPath path);
|
BlobContainer blobContainer(BlobPath path);
|
||||||
|
|
||||||
void delete(BlobPath path);
|
void delete(BlobPath path);
|
||||||
|
|
||||||
|
|
|
@ -1,39 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public interface ImmutableBlobContainer extends BlobContainer {
|
|
||||||
|
|
||||||
interface WriterListener {
|
|
||||||
void onCompleted();
|
|
||||||
|
|
||||||
void onFailure(Throwable t);
|
|
||||||
}
|
|
||||||
|
|
||||||
void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener);
|
|
||||||
|
|
||||||
void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException;
|
|
||||||
}
|
|
|
@ -21,27 +21,26 @@ package org.elasticsearch.common.blobstore.fs;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.apache.lucene.util.IOUtils;
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
import org.elasticsearch.ElasticsearchIllegalStateException;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
import org.elasticsearch.common.blobstore.support.PlainBlobMetaData;
|
||||||
import org.elasticsearch.common.collect.MapBuilder;
|
import org.elasticsearch.common.collect.MapBuilder;
|
||||||
|
import org.elasticsearch.common.io.FileSystemUtils;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.*;
|
||||||
import java.io.FileInputStream;
|
|
||||||
import java.io.FileNotFoundException;
|
|
||||||
import java.io.IOException;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public abstract class AbstractFsBlobContainer extends AbstractBlobContainer {
|
public class FsBlobContainer extends AbstractBlobContainer {
|
||||||
|
|
||||||
protected final FsBlobStore blobStore;
|
protected final FsBlobStore blobStore;
|
||||||
|
|
||||||
protected final File path;
|
protected final File path;
|
||||||
|
|
||||||
public AbstractFsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, File path) {
|
public FsBlobContainer(FsBlobStore blobStore, BlobPath blobPath, File path) {
|
||||||
super(blobPath);
|
super(blobPath);
|
||||||
this.blobStore = blobStore;
|
this.blobStore = blobStore;
|
||||||
this.path = path;
|
this.path = path;
|
||||||
|
@ -76,30 +75,20 @@ public abstract class AbstractFsBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readBlob(final String blobName, final ReadBlobListener listener) {
|
public InputStream openInput(String name) throws IOException {
|
||||||
blobStore.executor().execute(new Runnable() {
|
return new BufferedInputStream(new FileInputStream(new File(path, name)), blobStore.bufferSizeInBytes());
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OutputStream createOutput(String blobName) throws IOException {
|
||||||
|
final File file = new File(path, blobName);
|
||||||
|
return new BufferedOutputStream(new FilterOutputStream(new FileOutputStream(file)) {
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void close() throws IOException {
|
||||||
byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
|
super.close();
|
||||||
FileInputStream is = null;
|
IOUtils.fsync(file, false);
|
||||||
try {
|
IOUtils.fsync(path, true);
|
||||||
is = new FileInputStream(new File(path, blobName));
|
|
||||||
int bytesRead;
|
|
||||||
while ((bytesRead = is.read(buffer)) != -1) {
|
|
||||||
listener.onPartial(buffer, 0, bytesRead);
|
|
||||||
}
|
|
||||||
} catch (Throwable t) {
|
|
||||||
IOUtils.closeWhileHandlingException(is);
|
|
||||||
listener.onFailure(t);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
IOUtils.closeWhileHandlingException(is);
|
|
||||||
listener.onCompleted();
|
|
||||||
} catch (Throwable t) {
|
|
||||||
listener.onFailure(t);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
});
|
}, blobStore.bufferSizeInBytes());
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -19,35 +19,30 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore.fs;
|
package org.elasticsearch.common.blobstore.fs;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.blobstore.BlobStoreException;
|
import org.elasticsearch.common.blobstore.BlobStoreException;
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.io.FileSystemUtils;
|
import org.elasticsearch.common.io.FileSystemUtils;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.util.concurrent.Executor;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class FsBlobStore extends AbstractComponent implements BlobStore {
|
public class FsBlobStore extends AbstractComponent implements BlobStore {
|
||||||
|
|
||||||
private final ThreadPool threadPool;
|
|
||||||
|
|
||||||
private final File path;
|
private final File path;
|
||||||
|
|
||||||
private final int bufferSizeInBytes;
|
private final int bufferSizeInBytes;
|
||||||
|
|
||||||
public FsBlobStore(Settings settings, ThreadPool threadPool, File path) {
|
public FsBlobStore(Settings settings, File path) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.path = path;
|
this.path = path;
|
||||||
this.threadPool = threadPool;
|
|
||||||
if (!path.exists()) {
|
if (!path.exists()) {
|
||||||
boolean b = FileSystemUtils.mkdirs(path);
|
boolean b = FileSystemUtils.mkdirs(path);
|
||||||
if (!b) {
|
if (!b) {
|
||||||
|
@ -73,13 +68,9 @@ public class FsBlobStore extends AbstractComponent implements BlobStore {
|
||||||
return this.bufferSizeInBytes;
|
return this.bufferSizeInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
public Executor executor() {
|
|
||||||
return threadPool.executor(ThreadPool.Names.SNAPSHOT_DATA);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
|
public BlobContainer blobContainer(BlobPath path) {
|
||||||
return new FsImmutableBlobContainer(this, path, buildAndCreate(path));
|
return new FsBlobContainer(this, path, buildAndCreate(path));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -1,88 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore.fs;
|
|
||||||
|
|
||||||
import org.elasticsearch.ElasticsearchIllegalStateException;
|
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
import org.elasticsearch.common.blobstore.support.BlobStores;
|
|
||||||
import org.elasticsearch.common.io.FileSystemUtils;
|
|
||||||
|
|
||||||
import java.io.File;
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.RandomAccessFile;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class FsImmutableBlobContainer extends AbstractFsBlobContainer implements ImmutableBlobContainer {
|
|
||||||
|
|
||||||
public FsImmutableBlobContainer(FsBlobStore blobStore, BlobPath blobPath, File path) {
|
|
||||||
super(blobStore, blobPath, path);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBlob(final String blobName, final InputStream stream, final long sizeInBytes, final WriterListener listener) {
|
|
||||||
blobStore.executor().execute(new Runnable() {
|
|
||||||
@Override
|
|
||||||
public void run() {
|
|
||||||
final File file = new File(path, blobName);
|
|
||||||
boolean success = false;
|
|
||||||
try {
|
|
||||||
try (final RandomAccessFile raf = new RandomAccessFile(file, "rw");
|
|
||||||
final InputStream is = stream) {
|
|
||||||
// clean the file if it exists
|
|
||||||
raf.setLength(0);
|
|
||||||
long bytesWritten = 0;
|
|
||||||
final byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
|
|
||||||
int bytesRead;
|
|
||||||
while ((bytesRead = is.read(buffer)) != -1) {
|
|
||||||
raf.write(buffer, 0, bytesRead);
|
|
||||||
bytesWritten += bytesRead;
|
|
||||||
}
|
|
||||||
if (bytesWritten != sizeInBytes) {
|
|
||||||
throw new ElasticsearchIllegalStateException("[" + blobName + "]: wrote [" + bytesWritten + "], expected to write [" + sizeInBytes + "]");
|
|
||||||
}
|
|
||||||
// fsync the FD we are done with writing
|
|
||||||
raf.getFD().sync();
|
|
||||||
// try to fsync the directory to make sure all metadata is written to
|
|
||||||
// the storage device - NOTE: if it's a dir it will not throw any exception
|
|
||||||
FileSystemUtils.syncFile(path, true);
|
|
||||||
}
|
|
||||||
success = true;
|
|
||||||
} catch (Throwable e) {
|
|
||||||
listener.onFailure(e);
|
|
||||||
// just on the safe size, try and delete it on failure
|
|
||||||
FileSystemUtils.tryDeleteFile(file);
|
|
||||||
} finally {
|
|
||||||
if (success) {
|
|
||||||
listener.onCompleted();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
|
|
||||||
BlobStores.syncWriteBlob(this, blobName, is, sizeInBytes);
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -24,11 +24,7 @@ import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
import java.io.*;
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InterruptedIOException;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -46,46 +42,6 @@ public abstract class AbstractBlobContainer implements BlobContainer {
|
||||||
return this.path;
|
return this.path;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public byte[] readBlobFully(String blobName) throws IOException {
|
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
|
||||||
final AtomicReference<Throwable> failure = new AtomicReference<>();
|
|
||||||
final ByteArrayOutputStream bos = new ByteArrayOutputStream();
|
|
||||||
|
|
||||||
readBlob(blobName, new ReadBlobListener() {
|
|
||||||
@Override
|
|
||||||
public void onPartial(byte[] data, int offset, int size) {
|
|
||||||
bos.write(data, offset, size);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onCompleted() {
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Throwable t) {
|
|
||||||
failure.set(t);
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
|
||||||
latch.await();
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new InterruptedIOException("Interrupted while waiting to read [" + blobName + "]");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (failure.get() != null) {
|
|
||||||
if (failure.get() instanceof IOException) {
|
|
||||||
throw (IOException) failure.get();
|
|
||||||
} else {
|
|
||||||
throw new IOException("Failed to get [" + blobName + "]", failure.get());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bos.toByteArray();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
|
public ImmutableMap<String, BlobMetaData> listBlobsByPrefix(String blobNamePrefix) throws IOException {
|
||||||
ImmutableMap<String, BlobMetaData> allBlobs = listBlobs();
|
ImmutableMap<String, BlobMetaData> allBlobs = listBlobs();
|
||||||
|
@ -117,4 +73,5 @@ public abstract class AbstractBlobContainer implements BlobContainer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,64 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore.support;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.io.InterruptedIOException;
|
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
import java.util.concurrent.atomic.AtomicReference;
|
|
||||||
|
|
||||||
/**
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
public class BlobStores {
|
|
||||||
|
|
||||||
public static void syncWriteBlob(ImmutableBlobContainer blobContainer, String blobName, InputStream is, long sizeInBytes) throws IOException {
|
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
|
||||||
final AtomicReference<Throwable> failure = new AtomicReference<>();
|
|
||||||
blobContainer.writeBlob(blobName, is, sizeInBytes, new ImmutableBlobContainer.WriterListener() {
|
|
||||||
@Override
|
|
||||||
public void onCompleted() {
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Throwable t) {
|
|
||||||
failure.set(t);
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
try {
|
|
||||||
latch.await();
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
throw new InterruptedIOException("Interrupted while waiting to write [" + blobName + "]");
|
|
||||||
}
|
|
||||||
|
|
||||||
if (failure.get() != null) {
|
|
||||||
if (failure.get() instanceof IOException) {
|
|
||||||
throw (IOException) failure.get();
|
|
||||||
} else {
|
|
||||||
throw new IOException("Failed to get [" + blobName + "]", failure.get());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -20,32 +20,33 @@
|
||||||
package org.elasticsearch.common.blobstore.url;
|
package org.elasticsearch.common.blobstore.url;
|
||||||
|
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.apache.lucene.util.IOUtils;
|
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
import org.elasticsearch.common.blobstore.support.AbstractBlobContainer;
|
||||||
|
|
||||||
|
import java.io.BufferedInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.net.URL;
|
import java.net.URL;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* URL blob implementation of {@link org.elasticsearch.common.blobstore.BlobContainer}
|
* URL blob implementation of {@link org.elasticsearch.common.blobstore.BlobContainer}
|
||||||
*/
|
*/
|
||||||
public abstract class AbstractURLBlobContainer extends AbstractBlobContainer {
|
public class URLBlobContainer extends AbstractBlobContainer {
|
||||||
|
|
||||||
protected final URLBlobStore blobStore;
|
protected final URLBlobStore blobStore;
|
||||||
|
|
||||||
protected final URL path;
|
protected final URL path;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Constructs new AbstractURLBlobContainer
|
* Constructs new URLBlobContainer
|
||||||
*
|
*
|
||||||
* @param blobStore blob store
|
* @param blobStore blob store
|
||||||
* @param blobPath blob path for this container
|
* @param blobPath blob path for this container
|
||||||
* @param path URL for this container
|
* @param path URL for this container
|
||||||
*/
|
*/
|
||||||
public AbstractURLBlobContainer(URLBlobStore blobStore, BlobPath blobPath, URL path) {
|
public URLBlobContainer(URLBlobStore blobStore, BlobPath blobPath, URL path) {
|
||||||
super(blobPath);
|
super(blobPath);
|
||||||
this.blobStore = blobStore;
|
this.blobStore = blobStore;
|
||||||
this.path = path;
|
this.path = path;
|
||||||
|
@ -61,7 +62,7 @@ public abstract class AbstractURLBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This operation is not supported by AbstractURLBlobContainer
|
* This operation is not supported by URLBlobContainer
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
|
public ImmutableMap<String, BlobMetaData> listBlobs() throws IOException {
|
||||||
|
@ -69,7 +70,7 @@ public abstract class AbstractURLBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This operation is not supported by AbstractURLBlobContainer
|
* This operation is not supported by URLBlobContainer
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public boolean deleteBlob(String blobName) throws IOException {
|
public boolean deleteBlob(String blobName) throws IOException {
|
||||||
|
@ -77,41 +78,20 @@ public abstract class AbstractURLBlobContainer extends AbstractBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This operation is not supported by AbstractURLBlobContainer
|
* This operation is not supported by URLBlobContainer
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public boolean blobExists(String blobName) {
|
public boolean blobExists(String blobName) {
|
||||||
throw new UnsupportedOperationException("URL repository doesn't support this operation");
|
throw new UnsupportedOperationException("URL repository doesn't support this operation");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* {@inheritDoc}
|
|
||||||
*/
|
|
||||||
@Override
|
@Override
|
||||||
public void readBlob(final String blobName, final ReadBlobListener listener) {
|
public InputStream openInput(String name) throws IOException {
|
||||||
blobStore.executor().execute(new Runnable() {
|
return new BufferedInputStream(new URL(path, name).openStream(), blobStore.bufferSizeInBytes());
|
||||||
@Override
|
}
|
||||||
public void run() {
|
|
||||||
byte[] buffer = new byte[blobStore.bufferSizeInBytes()];
|
@Override
|
||||||
InputStream is = null;
|
public OutputStream createOutput(String blobName) throws IOException {
|
||||||
try {
|
throw new UnsupportedOperationException("URL repository doesn't support this operation");
|
||||||
is = new URL(path, blobName).openStream();
|
|
||||||
int bytesRead;
|
|
||||||
while ((bytesRead = is.read(buffer)) != -1) {
|
|
||||||
listener.onPartial(buffer, 0, bytesRead);
|
|
||||||
}
|
|
||||||
} catch (Throwable t) {
|
|
||||||
IOUtils.closeWhileHandlingException(is);
|
|
||||||
listener.onFailure(t);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
try {
|
|
||||||
IOUtils.closeWhileHandlingException(is);
|
|
||||||
listener.onCompleted();
|
|
||||||
} catch (Throwable t) {
|
|
||||||
listener.onFailure(t);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -19,10 +19,10 @@
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore.url;
|
package org.elasticsearch.common.blobstore.url;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.blobstore.BlobStoreException;
|
import org.elasticsearch.common.blobstore.BlobStoreException;
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
|
@ -38,8 +38,6 @@ import java.util.concurrent.Executor;
|
||||||
*/
|
*/
|
||||||
public class URLBlobStore extends AbstractComponent implements BlobStore {
|
public class URLBlobStore extends AbstractComponent implements BlobStore {
|
||||||
|
|
||||||
private final ThreadPool threadPool;
|
|
||||||
|
|
||||||
private final URL path;
|
private final URL path;
|
||||||
|
|
||||||
private final int bufferSizeInBytes;
|
private final int bufferSizeInBytes;
|
||||||
|
@ -54,14 +52,12 @@ public class URLBlobStore extends AbstractComponent implements BlobStore {
|
||||||
* </dl>
|
* </dl>
|
||||||
*
|
*
|
||||||
* @param settings settings
|
* @param settings settings
|
||||||
* @param threadPool thread pool for read operations
|
|
||||||
* @param path base URL
|
* @param path base URL
|
||||||
*/
|
*/
|
||||||
public URLBlobStore(Settings settings, ThreadPool threadPool, URL path) {
|
public URLBlobStore(Settings settings, URL path) {
|
||||||
super(settings);
|
super(settings);
|
||||||
this.path = path;
|
this.path = path;
|
||||||
this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
|
this.bufferSizeInBytes = (int) settings.getAsBytesSize("buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).bytes();
|
||||||
this.threadPool = threadPool;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -90,22 +86,13 @@ public class URLBlobStore extends AbstractComponent implements BlobStore {
|
||||||
return this.bufferSizeInBytes;
|
return this.bufferSizeInBytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns executor used for read operations
|
|
||||||
*
|
|
||||||
* @return executor
|
|
||||||
*/
|
|
||||||
public Executor executor() {
|
|
||||||
return threadPool.executor(ThreadPool.Names.SNAPSHOT_DATA);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* {@inheritDoc}
|
* {@inheritDoc}
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
|
public BlobContainer blobContainer(BlobPath path) {
|
||||||
try {
|
try {
|
||||||
return new URLImmutableBlobContainer(this, path, buildPath(path));
|
return new URLBlobContainer(this, path, buildPath(path));
|
||||||
} catch (MalformedURLException ex) {
|
} catch (MalformedURLException ex) {
|
||||||
throw new BlobStoreException("malformed URL " + path, ex);
|
throw new BlobStoreException("malformed URL " + path, ex);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,60 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.common.blobstore.url;
|
|
||||||
|
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.io.InputStream;
|
|
||||||
import java.net.URL;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Read-only URL-based implementation of {@link ImmutableBlobContainer}
|
|
||||||
*/
|
|
||||||
public class URLImmutableBlobContainer extends AbstractURLBlobContainer implements ImmutableBlobContainer {
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Constructs a new URLImmutableBlobContainer
|
|
||||||
*
|
|
||||||
* @param blobStore blob store
|
|
||||||
* @param blobPath blob path to this container
|
|
||||||
* @param path URL of this container
|
|
||||||
*/
|
|
||||||
public URLImmutableBlobContainer(URLBlobStore blobStore, BlobPath blobPath, URL path) {
|
|
||||||
super(blobStore, blobPath, path);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This operation is not supported by URL Blob Container
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void writeBlob(final String blobName, final InputStream is, final long sizeInBytes, final WriterListener listener) {
|
|
||||||
throw new UnsupportedOperationException("URL repository is read only");
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* This operation is not supported by URL Blob Container
|
|
||||||
*/
|
|
||||||
@Override
|
|
||||||
public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
|
|
||||||
throw new UnsupportedOperationException("URL repository is read only");
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -22,16 +22,15 @@ package org.elasticsearch.index.snapshots.blobstore;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.Iterables;
|
import com.google.common.collect.Iterables;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
import org.apache.lucene.index.CorruptIndexException;
|
||||||
import org.apache.lucene.store.*;
|
import org.apache.lucene.store.*;
|
||||||
import org.apache.lucene.util.BytesRef;
|
import org.apache.lucene.util.BytesRef;
|
||||||
import org.apache.lucene.util.IOUtils;
|
|
||||||
import org.elasticsearch.ExceptionsHelper;
|
import org.elasticsearch.ExceptionsHelper;
|
||||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||||
import org.elasticsearch.common.blobstore.*;
|
import org.elasticsearch.common.blobstore.*;
|
||||||
import org.elasticsearch.common.component.AbstractComponent;
|
import org.elasticsearch.common.component.AbstractComponent;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.common.io.stream.BytesStreamInput;
|
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
@ -48,14 +47,12 @@ import org.elasticsearch.indices.IndicesService;
|
||||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||||
import org.elasticsearch.repositories.RepositoryName;
|
import org.elasticsearch.repositories.RepositoryName;
|
||||||
|
|
||||||
import java.io.ByteArrayOutputStream;
|
|
||||||
import java.io.FilterInputStream;
|
import java.io.FilterInputStream;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.CopyOnWriteArrayList;
|
import java.util.concurrent.CopyOnWriteArrayList;
|
||||||
import java.util.concurrent.CountDownLatch;
|
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
|
||||||
|
|
||||||
import static com.google.common.collect.Lists.newArrayList;
|
import static com.google.common.collect.Lists.newArrayList;
|
||||||
|
|
||||||
|
@ -64,6 +61,7 @@ import static com.google.common.collect.Lists.newArrayList;
|
||||||
*/
|
*/
|
||||||
public class BlobStoreIndexShardRepository extends AbstractComponent implements IndexShardRepository {
|
public class BlobStoreIndexShardRepository extends AbstractComponent implements IndexShardRepository {
|
||||||
|
|
||||||
|
private static final int BUFFER_SIZE = 4096;
|
||||||
private BlobStore blobStore;
|
private BlobStore blobStore;
|
||||||
|
|
||||||
private BlobPath basePath;
|
private BlobPath basePath;
|
||||||
|
@ -144,8 +142,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
|
public void restore(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId, RecoveryState recoveryState) {
|
||||||
RestoreContext snapshotContext = new RestoreContext(snapshotId, shardId, snapshotShardId, recoveryState);
|
final RestoreContext snapshotContext = new RestoreContext(snapshotId, shardId, snapshotShardId, recoveryState);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
recoveryState.getIndex().startTime(System.currentTimeMillis());
|
recoveryState.getIndex().startTime(System.currentTimeMillis());
|
||||||
snapshotContext.restore();
|
snapshotContext.restore();
|
||||||
|
@ -205,24 +202,25 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
* Serializes snapshot to JSON
|
* Serializes snapshot to JSON
|
||||||
*
|
*
|
||||||
* @param snapshot snapshot
|
* @param snapshot snapshot
|
||||||
* @return JSON representation of the snapshot
|
* @param stream the stream to output the snapshot JSON represetation to
|
||||||
* @throws IOException
|
* @throws IOException if an IOException occurs
|
||||||
*/
|
*/
|
||||||
public static byte[] writeSnapshot(BlobStoreIndexShardSnapshot snapshot) throws IOException {
|
public static void writeSnapshot(BlobStoreIndexShardSnapshot snapshot, OutputStream stream) throws IOException {
|
||||||
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON).prettyPrint();
|
XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream).prettyPrint();
|
||||||
BlobStoreIndexShardSnapshot.toXContent(snapshot, builder, ToXContent.EMPTY_PARAMS);
|
BlobStoreIndexShardSnapshot.toXContent(snapshot, builder, ToXContent.EMPTY_PARAMS);
|
||||||
return builder.bytes().toBytes();
|
builder.flush();
|
||||||
|
builder.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Parses JSON representation of a snapshot
|
* Parses JSON representation of a snapshot
|
||||||
*
|
*
|
||||||
* @param data JSON
|
* @param stream JSON
|
||||||
* @return snapshot
|
* @return snapshot
|
||||||
* @throws IOException
|
* @throws IOException if an IOException occurs
|
||||||
*/
|
* */
|
||||||
public static BlobStoreIndexShardSnapshot readSnapshot(byte[] data) throws IOException {
|
public static BlobStoreIndexShardSnapshot readSnapshot(InputStream stream) throws IOException {
|
||||||
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(data)) {
|
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(stream)) {
|
||||||
parser.nextToken();
|
parser.nextToken();
|
||||||
return BlobStoreIndexShardSnapshot.fromXContent(parser);
|
return BlobStoreIndexShardSnapshot.fromXContent(parser);
|
||||||
}
|
}
|
||||||
|
@ -237,7 +235,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
|
|
||||||
protected final ShardId shardId;
|
protected final ShardId shardId;
|
||||||
|
|
||||||
protected final ImmutableBlobContainer blobContainer;
|
protected final BlobContainer blobContainer;
|
||||||
|
|
||||||
public Context(SnapshotId snapshotId, ShardId shardId) {
|
public Context(SnapshotId snapshotId, ShardId shardId) {
|
||||||
this(snapshotId, shardId, shardId);
|
this(snapshotId, shardId, shardId);
|
||||||
|
@ -246,7 +244,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
public Context(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId) {
|
public Context(SnapshotId snapshotId, ShardId shardId, ShardId snapshotShardId) {
|
||||||
this.snapshotId = snapshotId;
|
this.snapshotId = snapshotId;
|
||||||
this.shardId = shardId;
|
this.shardId = shardId;
|
||||||
blobContainer = blobStore.immutableBlobContainer(basePath.add("indices").add(snapshotShardId.getIndex()).add(Integer.toString(snapshotShardId.getId())));
|
blobContainer = blobStore.blobContainer(basePath.add("indices").add(snapshotShardId.getIndex()).add(Integer.toString(snapshotShardId.getId())));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -286,8 +284,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
*/
|
*/
|
||||||
public BlobStoreIndexShardSnapshot loadSnapshot() {
|
public BlobStoreIndexShardSnapshot loadSnapshot() {
|
||||||
BlobStoreIndexShardSnapshot snapshot;
|
BlobStoreIndexShardSnapshot snapshot;
|
||||||
try {
|
try (InputStream stream = blobContainer.openInput(snapshotBlobName(snapshotId))) {
|
||||||
snapshot = readSnapshot(blobContainer.readBlobFully(snapshotBlobName(snapshotId)));
|
snapshot = readSnapshot(stream);
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
|
throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
|
||||||
}
|
}
|
||||||
|
@ -362,8 +360,8 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
List<BlobStoreIndexShardSnapshot> snapshots = Lists.newArrayList();
|
List<BlobStoreIndexShardSnapshot> snapshots = Lists.newArrayList();
|
||||||
for (String name : blobs.keySet()) {
|
for (String name : blobs.keySet()) {
|
||||||
if (name.startsWith(SNAPSHOT_PREFIX)) {
|
if (name.startsWith(SNAPSHOT_PREFIX)) {
|
||||||
try {
|
try (InputStream stream = blobContainer.openInput(name)) {
|
||||||
snapshots.add(readSnapshot(blobContainer.readBlobFully(name)));
|
snapshots.add(readSnapshot(stream));
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
logger.warn("failed to read commit point [{}]", e, name);
|
logger.warn("failed to read commit point [{}]", e, name);
|
||||||
}
|
}
|
||||||
|
@ -469,28 +467,15 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
|
|
||||||
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED);
|
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.STARTED);
|
||||||
|
|
||||||
final CountDownLatch indexLatch = new CountDownLatch(filesToSnapshot.size());
|
|
||||||
|
|
||||||
for (FileInfo snapshotFileInfo : filesToSnapshot) {
|
for (FileInfo snapshotFileInfo : filesToSnapshot) {
|
||||||
try {
|
try {
|
||||||
snapshotFile(snapshotFileInfo, indexLatch, failures);
|
snapshotFile(snapshotFileInfo);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
failures.add(e);
|
throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration());
|
snapshotStatus.indexVersion(snapshotIndexCommit.getGeneration());
|
||||||
|
|
||||||
try {
|
|
||||||
indexLatch.await();
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
failures.add(e);
|
|
||||||
Thread.currentThread().interrupt();
|
|
||||||
}
|
|
||||||
if (!failures.isEmpty()) {
|
|
||||||
throw new IndexShardSnapshotFailedException(shardId, "Failed to perform snapshot (index files)", failures.get(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
// now create and write the commit point
|
// now create and write the commit point
|
||||||
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE);
|
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE);
|
||||||
|
|
||||||
|
@ -501,9 +486,10 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize);
|
System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize);
|
||||||
//TODO: The time stored in snapshot doesn't include cleanup time.
|
//TODO: The time stored in snapshot doesn't include cleanup time.
|
||||||
try {
|
try {
|
||||||
byte[] snapshotData = writeSnapshot(snapshot);
|
|
||||||
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
|
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
|
||||||
blobContainer.writeBlob(commitPointName, new BytesStreamInput(snapshotData, false), snapshotData.length);
|
try (OutputStream output = blobContainer.createOutput(commitPointName)) {
|
||||||
|
writeSnapshot(snapshot, output);
|
||||||
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
|
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
|
||||||
}
|
}
|
||||||
|
@ -529,98 +515,32 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
* added to the {@code failures} list
|
* added to the {@code failures} list
|
||||||
*
|
*
|
||||||
* @param fileInfo file to be snapshotted
|
* @param fileInfo file to be snapshotted
|
||||||
* @param latch latch that should be counted down once file is snapshoted
|
|
||||||
* @param failures thread-safe list of failures
|
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
private void snapshotFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) throws IOException {
|
private void snapshotFile(final BlobStoreIndexShardSnapshot.FileInfo fileInfo) throws IOException {
|
||||||
final String file = fileInfo.physicalName();
|
final String file = fileInfo.physicalName();
|
||||||
IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata());
|
try (IndexInput indexInput = store.openVerifyingInput(file, IOContext.READONCE, fileInfo.metadata())) {
|
||||||
writeBlob(indexInput, fileInfo, 0, latch, failures);
|
for (int i = 0; i < fileInfo.numberOfParts(); i++) {
|
||||||
}
|
final InputStreamIndexInput inputStreamIndexInput = new InputStreamIndexInput(indexInput, fileInfo.partBytes());
|
||||||
|
InputStream inputStream = snapshotRateLimiter == null ? inputStreamIndexInput : new RateLimitingInputStream(inputStreamIndexInput, snapshotRateLimiter, snapshotThrottleListener);
|
||||||
private class BlobPartWriter implements ImmutableBlobContainer.WriterListener {
|
inputStream = new AbortableInputStream(inputStream, fileInfo.physicalName());
|
||||||
|
try (OutputStream output = blobContainer.createOutput(fileInfo.partName(i))) {
|
||||||
private final int part;
|
int len;
|
||||||
|
final byte[] buffer = new byte[BUFFER_SIZE];
|
||||||
private final FileInfo fileInfo;
|
while ((len = inputStream.read(buffer)) > 0) {
|
||||||
|
output.write(buffer, 0, len);
|
||||||
private final List<Throwable> failures;
|
}
|
||||||
|
|
||||||
private final CountDownLatch latch;
|
|
||||||
|
|
||||||
private final IndexInput indexInput;
|
|
||||||
|
|
||||||
private final InputStream inputStream;
|
|
||||||
|
|
||||||
private final InputStreamIndexInput inputStreamIndexInput;
|
|
||||||
|
|
||||||
private BlobPartWriter(IndexInput indexInput, FileInfo fileInfo, int part, CountDownLatch latch, List<Throwable> failures) throws IOException {
|
|
||||||
this.indexInput = indexInput;
|
|
||||||
this.part = part;
|
|
||||||
this.fileInfo = fileInfo;
|
|
||||||
this.failures = failures;
|
|
||||||
this.latch = latch;
|
|
||||||
inputStreamIndexInput = new InputStreamIndexInput(indexInput, fileInfo.partBytes());
|
|
||||||
InputStream inputStream = inputStreamIndexInput;
|
|
||||||
if (snapshotRateLimiter != null) {
|
|
||||||
inputStream = new RateLimitingInputStream(inputStream, snapshotRateLimiter, snapshotThrottleListener);
|
|
||||||
}
|
|
||||||
inputStream = new AbortableInputStream(inputStream, fileInfo.physicalName());
|
|
||||||
this.inputStream = inputStream;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onCompleted() {
|
|
||||||
int nextPart = part + 1;
|
|
||||||
if (nextPart < fileInfo.numberOfParts()) {
|
|
||||||
try {
|
|
||||||
// We have more parts to go
|
|
||||||
writeBlob(indexInput, fileInfo, nextPart, latch, failures);
|
|
||||||
} catch (Throwable t) {
|
|
||||||
onFailure(t);
|
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Last part - verify checksum
|
|
||||||
try {
|
|
||||||
Store.verify(indexInput);
|
|
||||||
indexInput.close();
|
|
||||||
snapshotStatus.addProcessedFile(fileInfo.length());
|
|
||||||
} catch (Throwable t) {
|
|
||||||
onFailure(t);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
latch.countDown();
|
|
||||||
}
|
}
|
||||||
}
|
Store.verify(indexInput);
|
||||||
|
snapshotStatus.addProcessedFile(fileInfo.length());
|
||||||
@Override
|
|
||||||
public void onFailure(Throwable t) {
|
|
||||||
cleanupFailedSnapshot(t, indexInput, latch, failures);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void writeBlobPart() throws IOException {
|
|
||||||
blobContainer.writeBlob(fileInfo.partName(part), inputStream, inputStreamIndexInput.actualSizeToRead(), this);
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
private void writeBlob(IndexInput indexInput, FileInfo fileInfo, int part, CountDownLatch latch, List<Throwable> failures) {
|
|
||||||
try {
|
|
||||||
new BlobPartWriter(indexInput, fileInfo, part, latch, failures).writeBlobPart();
|
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
cleanupFailedSnapshot(t, indexInput, latch, failures);
|
failStoreIfCorrupted(t);
|
||||||
|
snapshotStatus.addProcessedFile(0);
|
||||||
|
throw t;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void cleanupFailedSnapshot(Throwable t, IndexInput indexInput, CountDownLatch latch, List<Throwable> failures) {
|
|
||||||
IOUtils.closeWhileHandlingException(indexInput);
|
|
||||||
failStoreIfCorrupted(t);
|
|
||||||
snapshotStatus.addProcessedFile(0);
|
|
||||||
failures.add(t);
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
|
|
||||||
private void failStoreIfCorrupted(Throwable t) {
|
private void failStoreIfCorrupted(Throwable t) {
|
||||||
if (t instanceof CorruptIndexException) {
|
if (t instanceof CorruptIndexException) {
|
||||||
try {
|
try {
|
||||||
|
@ -693,72 +613,42 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
* The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the
|
* The new logic for StoreFileMetaData reads the entire <tt>.si</tt> and <tt>segments.n</tt> files to strengthen the
|
||||||
* comparison of the files on a per-segment / per-commit level.
|
* comparison of the files on a per-segment / per-commit level.
|
||||||
*/
|
*/
|
||||||
private static final void maybeRecalculateMetadataHash(final ImmutableBlobContainer blobContainer, final FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Throwable {
|
private static final void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final FileInfo fileInfo, Store.MetadataSnapshot snapshot) throws Throwable {
|
||||||
final StoreFileMetaData metadata;
|
final StoreFileMetaData metadata;
|
||||||
if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
|
if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) {
|
||||||
if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
|
if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) {
|
||||||
// we have a hash - check if our repo has a hash too otherwise we have
|
// we have a hash - check if our repo has a hash too otherwise we have
|
||||||
// to calculate it.
|
// to calculate it.
|
||||||
final ByteArrayOutputStream out = new ByteArrayOutputStream();
|
|
||||||
final CountDownLatch latch = new CountDownLatch(1);
|
|
||||||
final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<>();
|
|
||||||
// we might have multiple parts even though the file is small... make sure we read all of it.
|
// we might have multiple parts even though the file is small... make sure we read all of it.
|
||||||
// TODO this API should really support a stream!
|
try (final InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
|
||||||
blobContainer.readBlob(fileInfo.partName(0), new BlobContainer.ReadBlobListener() {
|
final byte[] bytes = ByteStreams.toByteArray(stream);
|
||||||
final AtomicInteger partIndex = new AtomicInteger();
|
assert bytes != null;
|
||||||
@Override
|
assert bytes.length == fileInfo.length() : bytes.length + " != " + fileInfo.length();
|
||||||
public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
|
final BytesRef spare = new BytesRef(bytes);
|
||||||
out.write(data, offset, size);
|
Store.MetadataSnapshot.hashFile(fileInfo.metadata().hash(), spare);
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized void onCompleted() {
|
|
||||||
boolean countDown = true;
|
|
||||||
try {
|
|
||||||
final int part = partIndex.incrementAndGet();
|
|
||||||
if (part < fileInfo.numberOfParts()) {
|
|
||||||
final String partName = fileInfo.partName(part);
|
|
||||||
// continue with the new part
|
|
||||||
blobContainer.readBlob(partName, this);
|
|
||||||
countDown = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
} finally {
|
|
||||||
if (countDown) {
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void onFailure(Throwable t) {
|
|
||||||
try {
|
|
||||||
failures.add(t);
|
|
||||||
} finally {
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
try {
|
|
||||||
latch.await();
|
|
||||||
} catch (InterruptedException e) {
|
|
||||||
Thread.interrupted();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!failures.isEmpty()) {
|
|
||||||
ExceptionsHelper.rethrowAndSuppress(failures);
|
|
||||||
}
|
|
||||||
|
|
||||||
final byte[] bytes = out.toByteArray();
|
|
||||||
assert bytes != null;
|
|
||||||
assert bytes.length == fileInfo.length() : bytes.length + " != " + fileInfo.length();
|
|
||||||
final BytesRef spare = new BytesRef(bytes);
|
|
||||||
Store.MetadataSnapshot.hashFile(fileInfo.metadata().hash(), spare);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static final class PartSliceStream extends SlicedInputStream {
|
||||||
|
|
||||||
|
private final BlobContainer container;
|
||||||
|
private final FileInfo info;
|
||||||
|
|
||||||
|
public PartSliceStream(BlobContainer container, FileInfo info) {
|
||||||
|
super(info.numberOfParts());
|
||||||
|
this.info = info;
|
||||||
|
this.container = container;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected InputStream openSlice(long slice) throws IOException {
|
||||||
|
return container.openInput(info.partName(slice));
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Context for restore operations
|
* Context for restore operations
|
||||||
*/
|
*/
|
||||||
|
@ -864,25 +754,14 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
if (logger.isTraceEnabled()) {
|
if (logger.isTraceEnabled()) {
|
||||||
logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId, numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
|
logger.trace("[{}] [{}] recovering_files [{}] with total_size [{}], reusing_files [{}] with reused_size [{}]", shardId, snapshotId, numberOfFiles, new ByteSizeValue(totalSize), numberOfReusedFiles, new ByteSizeValue(reusedTotalSize));
|
||||||
}
|
}
|
||||||
|
|
||||||
final CountDownLatch latch = new CountDownLatch(filesToRecover.size());
|
|
||||||
final CopyOnWriteArrayList<Throwable> failures = new CopyOnWriteArrayList<>();
|
|
||||||
|
|
||||||
for (final FileInfo fileToRecover : filesToRecover) {
|
|
||||||
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
|
|
||||||
restoreFile(fileToRecover, latch, failures);
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
try {
|
||||||
latch.await();
|
for (final FileInfo fileToRecover : filesToRecover) {
|
||||||
} catch (InterruptedException e) {
|
logger.trace("[{}] [{}] restoring file [{}]", shardId, snapshotId, fileToRecover.name());
|
||||||
Thread.currentThread().interrupt();
|
restoreFile(fileToRecover);
|
||||||
|
}
|
||||||
|
} catch (IOException ex) {
|
||||||
|
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", ex);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!failures.isEmpty()) {
|
|
||||||
throw new IndexShardRestoreFailedException(shardId, "Failed to recover index", failures.get(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
// read the snapshot data persisted
|
// read the snapshot data persisted
|
||||||
long version = -1;
|
long version = -1;
|
||||||
try {
|
try {
|
||||||
|
@ -919,107 +798,52 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
* added to the {@code failures} list
|
* added to the {@code failures} list
|
||||||
*
|
*
|
||||||
* @param fileInfo file to be restored
|
* @param fileInfo file to be restored
|
||||||
* @param latch latch that should be counted down once file is snapshoted
|
|
||||||
* @param failures thread-safe list of failures
|
|
||||||
*/
|
*/
|
||||||
private void restoreFile(final FileInfo fileInfo, final CountDownLatch latch, final List<Throwable> failures) {
|
private void restoreFile(final FileInfo fileInfo) throws IOException {
|
||||||
final IndexOutput indexOutput;
|
|
||||||
try {
|
|
||||||
// we create an output with no checksum, this is because the pure binary data of the file is not
|
|
||||||
// the checksum (because of seek). We will create the checksum file once copying is done
|
|
||||||
indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), IOContext.DEFAULT, fileInfo.metadata());
|
|
||||||
} catch (IOException e) {
|
|
||||||
try {
|
|
||||||
failures.add(e);
|
|
||||||
} finally {
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
String firstFileToRecover = fileInfo.partName(0);
|
|
||||||
final AtomicInteger partIndex = new AtomicInteger();
|
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
RecoveryState.File file = recoveryState.getIndex().file(fileInfo.name());
|
||||||
blobContainer.readBlob(firstFileToRecover, new BlobContainer.ReadBlobListener() {
|
try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
|
||||||
@Override
|
try (final IndexOutput indexOutput = store.createVerifyingOutput(fileInfo.physicalName(), IOContext.DEFAULT, fileInfo.metadata())) {
|
||||||
public synchronized void onPartial(byte[] data, int offset, int size) throws IOException {
|
final byte[] buffer = new byte[BUFFER_SIZE];
|
||||||
recoveryState.getIndex().addRecoveredByteCount(size);
|
int length;
|
||||||
RecoveryState.File file = recoveryState.getIndex().file(fileInfo.name());
|
while((length=stream.read(buffer))>0){
|
||||||
|
indexOutput.writeBytes(buffer,0,length);
|
||||||
if (file != null) {
|
if (file != null) {
|
||||||
file.updateRecovered(size);
|
file.updateRecovered(length);
|
||||||
}
|
}
|
||||||
indexOutput.writeBytes(data, offset, size);
|
|
||||||
if (restoreRateLimiter != null) {
|
if (restoreRateLimiter != null) {
|
||||||
rateLimiterListener.onRestorePause(restoreRateLimiter.pause(size));
|
rateLimiterListener.onRestorePause(restoreRateLimiter.pause(length));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Store.verify(indexOutput);
|
||||||
|
indexOutput.close();
|
||||||
|
// write the checksum
|
||||||
|
if (fileInfo.metadata().hasLegacyChecksum()) {
|
||||||
|
Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
||||||
|
legacyChecksums.add(fileInfo.metadata());
|
||||||
|
legacyChecksums.write(store);
|
||||||
|
|
||||||
@Override
|
|
||||||
public synchronized void onCompleted() {
|
|
||||||
int part = partIndex.incrementAndGet();
|
|
||||||
if (part < fileInfo.numberOfParts()) {
|
|
||||||
String partName = fileInfo.partName(part);
|
|
||||||
// continue with the new part
|
|
||||||
blobContainer.readBlob(partName, this);
|
|
||||||
return;
|
|
||||||
} else {
|
|
||||||
// we are done...
|
|
||||||
try {
|
|
||||||
Store.verify(indexOutput);
|
|
||||||
indexOutput.close();
|
|
||||||
// write the checksum
|
|
||||||
if (fileInfo.metadata().hasLegacyChecksum()) {
|
|
||||||
Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
|
||||||
legacyChecksums.add(fileInfo.metadata());
|
|
||||||
legacyChecksums.write(store);
|
|
||||||
|
|
||||||
}
|
|
||||||
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
|
|
||||||
recoveryState.getIndex().addRecoveredFileCount(1);
|
|
||||||
} catch (IOException e) {
|
|
||||||
onFailure(e);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
latch.countDown();
|
|
||||||
}
|
}
|
||||||
|
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
|
||||||
@Override
|
recoveryState.getIndex().addRecoveredFileCount(1);
|
||||||
public void onFailure(Throwable t) {
|
success = true;
|
||||||
try {
|
} catch (CorruptIndexException ex) {
|
||||||
failures.add(t);
|
|
||||||
IOUtils.closeWhileHandlingException(indexOutput);
|
|
||||||
if (t instanceof CorruptIndexException) {
|
|
||||||
try {
|
|
||||||
store.markStoreCorrupted((CorruptIndexException) t);
|
|
||||||
} catch (IOException e) {
|
|
||||||
logger.warn("store cannot be marked as corrupted", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
store.deleteQuiet(fileInfo.physicalName());
|
|
||||||
} finally {
|
|
||||||
latch.countDown();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
success = true;
|
|
||||||
} finally {
|
|
||||||
if (!success) {
|
|
||||||
try {
|
try {
|
||||||
IOUtils.closeWhileHandlingException(indexOutput);
|
store.markStoreCorrupted(ex);
|
||||||
store.deleteQuiet(fileInfo.physicalName());
|
} catch (IOException e) {
|
||||||
} finally {
|
logger.warn("store cannot be marked as corrupted", e);
|
||||||
latch.countDown();
|
}
|
||||||
|
throw ex;
|
||||||
|
} finally {
|
||||||
|
if (success == false) {
|
||||||
|
store.deleteQuiet(fileInfo.physicalName());
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public interface RateLimiterListener {
|
public interface RateLimiterListener {
|
||||||
void onRestorePause(long nanos);
|
void onRestorePause(long nanos);
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,113 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.index.snapshots.blobstore;
|
||||||
|
|
||||||
|
import org.apache.lucene.util.IOUtils;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A {@link SlicedInputStream} is a logical
|
||||||
|
* concatenation one or more input streams. In contrast to the JDKs
|
||||||
|
* {@link java.io.SequenceInputStream} this stream doesn't require the instantiation
|
||||||
|
* of all logical sub-streams ahead of time. Instead, {@link #openSlice(long)} is called
|
||||||
|
* if a new slice is required. Each slice is closed once it's been fully consumed or if
|
||||||
|
* close is called before.
|
||||||
|
*/
|
||||||
|
public abstract class SlicedInputStream extends InputStream {
|
||||||
|
private long slice = 0;
|
||||||
|
private InputStream currentStream;
|
||||||
|
private final long numSlices;
|
||||||
|
private boolean initialized = false;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new SlicedInputStream
|
||||||
|
* @param numSlices the number of slices to consume
|
||||||
|
*/
|
||||||
|
protected SlicedInputStream(final long numSlices) {
|
||||||
|
this.numSlices = numSlices;
|
||||||
|
}
|
||||||
|
|
||||||
|
private InputStream nextStream() throws IOException {
|
||||||
|
assert initialized == false || currentStream != null;
|
||||||
|
initialized = true;
|
||||||
|
IOUtils.close(currentStream);
|
||||||
|
if (slice < numSlices) {
|
||||||
|
currentStream = openSlice(slice++);
|
||||||
|
} else {
|
||||||
|
currentStream = null;
|
||||||
|
}
|
||||||
|
return currentStream;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Called for each logical slice given a zero based slice ordinal.
|
||||||
|
*/
|
||||||
|
protected abstract InputStream openSlice(long slice) throws IOException;
|
||||||
|
|
||||||
|
private InputStream currentStream() throws IOException {
|
||||||
|
if (currentStream == null) {
|
||||||
|
return initialized ? null : nextStream();
|
||||||
|
}
|
||||||
|
return currentStream;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final int read() throws IOException {
|
||||||
|
InputStream stream = currentStream();
|
||||||
|
if (stream == null) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
final int read = stream.read();
|
||||||
|
if (read == -1) {
|
||||||
|
nextStream();
|
||||||
|
return read();
|
||||||
|
}
|
||||||
|
return read;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final int read(byte[] buffer, int offset, int length) throws IOException {
|
||||||
|
final InputStream stream = currentStream();
|
||||||
|
if (stream == null) {
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
final int read = stream.read(buffer, offset, length);
|
||||||
|
if (read <= 0) {
|
||||||
|
nextStream();
|
||||||
|
return read(buffer, offset, length);
|
||||||
|
}
|
||||||
|
return read;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final void close() throws IOException {
|
||||||
|
IOUtils.close(currentStream);
|
||||||
|
initialized = true;
|
||||||
|
currentStream = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public final int available() throws IOException {
|
||||||
|
InputStream stream = currentStream();
|
||||||
|
return stream == null ? 0 : stream.available();
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
|
@ -22,16 +22,17 @@ package org.elasticsearch.repositories.blobstore;
|
||||||
import com.google.common.collect.ImmutableList;
|
import com.google.common.collect.ImmutableList;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
|
import com.google.common.io.ByteStreams;
|
||||||
import org.apache.lucene.store.RateLimiter;
|
import org.apache.lucene.store.RateLimiter;
|
||||||
import org.elasticsearch.ElasticsearchException;
|
import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.ElasticsearchParseException;
|
import org.elasticsearch.ElasticsearchParseException;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
import org.elasticsearch.cluster.metadata.MetaData;
|
import org.elasticsearch.cluster.metadata.MetaData;
|
||||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||||
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
import org.elasticsearch.common.bytes.BytesReference;
|
import org.elasticsearch.common.bytes.BytesReference;
|
||||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||||
import org.elasticsearch.common.compress.CompressorFactory;
|
import org.elasticsearch.common.compress.CompressorFactory;
|
||||||
|
@ -52,6 +53,8 @@ import org.elasticsearch.snapshots.*;
|
||||||
|
|
||||||
import java.io.FileNotFoundException;
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.nio.file.NoSuchFileException;
|
import java.nio.file.NoSuchFileException;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -103,7 +106,7 @@ import static com.google.common.collect.Lists.newArrayList;
|
||||||
*/
|
*/
|
||||||
public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Repository> implements Repository, RateLimiterListener {
|
public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Repository> implements Repository, RateLimiterListener {
|
||||||
|
|
||||||
private ImmutableBlobContainer snapshotsBlobContainer;
|
private BlobContainer snapshotsBlobContainer;
|
||||||
|
|
||||||
protected final String repositoryName;
|
protected final String repositoryName;
|
||||||
|
|
||||||
|
@ -150,7 +153,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
protected void doStart() throws ElasticsearchException {
|
protected void doStart() throws ElasticsearchException {
|
||||||
this.snapshotsBlobContainer = blobStore().immutableBlobContainer(basePath());
|
this.snapshotsBlobContainer = blobStore().blobContainer(basePath());
|
||||||
indexShardRepository.initialize(blobStore(), basePath(), chunkSize(), snapshotRateLimiter, restoreRateLimiter, this);
|
indexShardRepository.initialize(blobStore(), basePath(), chunkSize(), snapshotRateLimiter, restoreRateLimiter, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -225,16 +228,20 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
throw new InvalidSnapshotNameException(snapshotId, "snapshot with such name already exists");
|
throw new InvalidSnapshotNameException(snapshotId, "snapshot with such name already exists");
|
||||||
}
|
}
|
||||||
BytesReference bRef = bStream.bytes();
|
BytesReference bRef = bStream.bytes();
|
||||||
snapshotsBlobContainer.writeBlob(snapshotBlobName, bRef.streamInput(), bRef.length());
|
try (OutputStream output = snapshotsBlobContainer.createOutput(snapshotBlobName)) {
|
||||||
|
bRef.writeTo(output);
|
||||||
|
}
|
||||||
// Write Global MetaData
|
// Write Global MetaData
|
||||||
// TODO: Check if metadata needs to be written
|
// TODO: Check if metadata needs to be written
|
||||||
bStream = writeGlobalMetaData(metaData);
|
bStream = writeGlobalMetaData(metaData);
|
||||||
bRef = bStream.bytes();
|
bRef = bStream.bytes();
|
||||||
snapshotsBlobContainer.writeBlob(metaDataBlobName(snapshotId), bRef.streamInput(), bRef.length());
|
try (OutputStream output = snapshotsBlobContainer.createOutput(metaDataBlobName(snapshotId))) {
|
||||||
|
bRef.writeTo(output);
|
||||||
|
}
|
||||||
for (String index : indices) {
|
for (String index : indices) {
|
||||||
IndexMetaData indexMetaData = metaData.index(index);
|
IndexMetaData indexMetaData = metaData.index(index);
|
||||||
BlobPath indexPath = basePath().add("indices").add(index);
|
BlobPath indexPath = basePath().add("indices").add(index);
|
||||||
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
|
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||||
bStream = new BytesStreamOutput();
|
bStream = new BytesStreamOutput();
|
||||||
StreamOutput stream = bStream;
|
StreamOutput stream = bStream;
|
||||||
if (isCompress()) {
|
if (isCompress()) {
|
||||||
|
@ -246,7 +253,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
builder.close();
|
builder.close();
|
||||||
bRef = bStream.bytes();
|
bRef = bStream.bytes();
|
||||||
indexMetaDataBlobContainer.writeBlob(snapshotBlobName(snapshotId), bRef.streamInput(), bRef.length());
|
try (OutputStream output = indexMetaDataBlobContainer.createOutput(snapshotBlobName(snapshotId))) {
|
||||||
|
bRef.writeTo(output);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
throw new SnapshotCreationException(snapshotId, ex);
|
throw new SnapshotCreationException(snapshotId, ex);
|
||||||
|
@ -280,7 +289,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
// Now delete all indices
|
// Now delete all indices
|
||||||
for (String index : snapshot.indices()) {
|
for (String index : snapshot.indices()) {
|
||||||
BlobPath indexPath = basePath().add("indices").add(index);
|
BlobPath indexPath = basePath().add("indices").add(index);
|
||||||
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
|
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||||
try {
|
try {
|
||||||
indexMetaDataBlobContainer.deleteBlob(blobName);
|
indexMetaDataBlobContainer.deleteBlob(blobName);
|
||||||
} catch (IOException ex) {
|
} catch (IOException ex) {
|
||||||
|
@ -327,7 +336,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
snapshot = updatedSnapshot.build();
|
snapshot = updatedSnapshot.build();
|
||||||
BytesStreamOutput bStream = writeSnapshot(snapshot);
|
BytesStreamOutput bStream = writeSnapshot(snapshot);
|
||||||
BytesReference bRef = bStream.bytes();
|
BytesReference bRef = bStream.bytes();
|
||||||
snapshotsBlobContainer.writeBlob(blobName, bRef.streamInput(), bRef.length());
|
try (OutputStream output = snapshotsBlobContainer.createOutput(blobName)) {
|
||||||
|
bRef.writeTo(output);
|
||||||
|
}
|
||||||
ImmutableList<SnapshotId> snapshotIds = snapshots();
|
ImmutableList<SnapshotId> snapshotIds = snapshots();
|
||||||
if (!snapshotIds.contains(snapshotId)) {
|
if (!snapshotIds.contains(snapshotId)) {
|
||||||
snapshotIds = ImmutableList.<SnapshotId>builder().addAll(snapshotIds).add(snapshotId).build();
|
snapshotIds = ImmutableList.<SnapshotId>builder().addAll(snapshotIds).add(snapshotId).build();
|
||||||
|
@ -381,22 +392,24 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
String blobName = snapshotBlobName(snapshotId);
|
String blobName = snapshotBlobName(snapshotId);
|
||||||
int retryCount = 0;
|
int retryCount = 0;
|
||||||
while (true) {
|
while (true) {
|
||||||
byte[] data = snapshotsBlobContainer.readBlobFully(blobName);
|
try (InputStream blob = snapshotsBlobContainer.openInput(blobName)){
|
||||||
// Because we are overriding snapshot during finalization, it's possible that
|
byte[] data = ByteStreams.toByteArray(blob);
|
||||||
// we can get an empty or incomplete snapshot for a brief moment
|
// Because we are overriding snapshot during finalization, it's possible that
|
||||||
// retrying after some what can resolve the issue
|
// we can get an empty or incomplete snapshot for a brief moment
|
||||||
// TODO: switch to atomic update after non-local gateways are removed and we switch to java 1.7
|
// retrying after some what can resolve the issue
|
||||||
try {
|
// TODO: switch to atomic update after non-local gateways are removed and we switch to java 1.7
|
||||||
return readSnapshot(data);
|
try {
|
||||||
} catch (ElasticsearchParseException ex) {
|
return readSnapshot(data);
|
||||||
if (retryCount++ < 3) {
|
} catch (ElasticsearchParseException ex) {
|
||||||
try {
|
if (retryCount++ < 3) {
|
||||||
Thread.sleep(50);
|
try {
|
||||||
} catch (InterruptedException ex1) {
|
Thread.sleep(50);
|
||||||
Thread.currentThread().interrupt();
|
} catch (InterruptedException ex1) {
|
||||||
|
Thread.currentThread().interrupt();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
throw ex;
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
throw ex;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -409,8 +422,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
|
|
||||||
private MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices, boolean ignoreIndexErrors) {
|
private MetaData readSnapshotMetaData(SnapshotId snapshotId, ImmutableList<String> indices, boolean ignoreIndexErrors) {
|
||||||
MetaData metaData;
|
MetaData metaData;
|
||||||
try {
|
try (InputStream blob = snapshotsBlobContainer.openInput(metaDataBlobName(snapshotId))){
|
||||||
byte[] data = snapshotsBlobContainer.readBlobFully(metaDataBlobName(snapshotId));
|
byte[] data = ByteStreams.toByteArray(blob);
|
||||||
metaData = readMetaData(data);
|
metaData = readMetaData(data);
|
||||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||||
throw new SnapshotMissingException(snapshotId, ex);
|
throw new SnapshotMissingException(snapshotId, ex);
|
||||||
|
@ -420,9 +433,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
|
MetaData.Builder metaDataBuilder = MetaData.builder(metaData);
|
||||||
for (String index : indices) {
|
for (String index : indices) {
|
||||||
BlobPath indexPath = basePath().add("indices").add(index);
|
BlobPath indexPath = basePath().add("indices").add(index);
|
||||||
ImmutableBlobContainer indexMetaDataBlobContainer = blobStore().immutableBlobContainer(indexPath);
|
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||||
try {
|
try (InputStream blob = indexMetaDataBlobContainer.openInput(snapshotBlobName(snapshotId))) {
|
||||||
byte[] data = indexMetaDataBlobContainer.readBlobFully(snapshotBlobName(snapshotId));
|
byte[] data = ByteStreams.toByteArray(blob);
|
||||||
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
||||||
XContentParser.Token token;
|
XContentParser.Token token;
|
||||||
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
|
if ((token = parser.nextToken()) == XContentParser.Token.START_OBJECT) {
|
||||||
|
@ -599,7 +612,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
builder.close();
|
builder.close();
|
||||||
BytesReference bRef = bStream.bytes();
|
BytesReference bRef = bStream.bytes();
|
||||||
snapshotsBlobContainer.writeBlob(SNAPSHOTS_FILE, bRef.streamInput(), bRef.length());
|
try (OutputStream output = snapshotsBlobContainer.createOutput(SNAPSHOTS_FILE)) {
|
||||||
|
bRef.writeTo(output);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -611,23 +626,25 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent<Rep
|
||||||
* @throws IOException I/O errors
|
* @throws IOException I/O errors
|
||||||
*/
|
*/
|
||||||
protected ImmutableList<SnapshotId> readSnapshotList() throws IOException {
|
protected ImmutableList<SnapshotId> readSnapshotList() throws IOException {
|
||||||
byte[] data = snapshotsBlobContainer.readBlobFully(SNAPSHOTS_FILE);
|
try (InputStream blob = snapshotsBlobContainer.openInput(SNAPSHOTS_FILE)){
|
||||||
ArrayList<SnapshotId> snapshots = new ArrayList<>();
|
final byte[] data = ByteStreams.toByteArray(blob);
|
||||||
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
ArrayList<SnapshotId> snapshots = new ArrayList<>();
|
||||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
try (XContentParser parser = XContentHelper.createParser(data, 0, data.length)) {
|
||||||
if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||||
String currentFieldName = parser.currentName();
|
if (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
||||||
if ("snapshots".equals(currentFieldName)) {
|
String currentFieldName = parser.currentName();
|
||||||
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
|
if ("snapshots".equals(currentFieldName)) {
|
||||||
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
if (parser.nextToken() == XContentParser.Token.START_ARRAY) {
|
||||||
snapshots.add(new SnapshotId(repositoryName, parser.text()));
|
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
|
||||||
|
snapshots.add(new SnapshotId(repositoryName, parser.text()));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return ImmutableList.copyOf(snapshots);
|
||||||
}
|
}
|
||||||
return ImmutableList.copyOf(snapshots);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -66,7 +66,7 @@ public class FsRepository extends BlobStoreRepository {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Inject
|
@Inject
|
||||||
public FsRepository(RepositoryName name, RepositorySettings repositorySettings, ThreadPool threadPool, IndexShardRepository indexShardRepository) throws IOException {
|
public FsRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
|
||||||
super(name.getName(), repositorySettings, indexShardRepository);
|
super(name.getName(), repositorySettings, indexShardRepository);
|
||||||
File locationFile;
|
File locationFile;
|
||||||
String location = repositorySettings.settings().get("location", componentSettings.get("location"));
|
String location = repositorySettings.settings().get("location", componentSettings.get("location"));
|
||||||
|
@ -76,7 +76,7 @@ public class FsRepository extends BlobStoreRepository {
|
||||||
} else {
|
} else {
|
||||||
locationFile = new File(location);
|
locationFile = new File(location);
|
||||||
}
|
}
|
||||||
blobStore = new FsBlobStore(componentSettings, threadPool, locationFile);
|
blobStore = new FsBlobStore(componentSettings, locationFile);
|
||||||
this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", componentSettings.getAsBytesSize("chunk_size", null));
|
this.chunkSize = repositorySettings.settings().getAsBytesSize("chunk_size", componentSettings.getAsBytesSize("chunk_size", null));
|
||||||
this.compress = repositorySettings.settings().getAsBoolean("compress", componentSettings.getAsBoolean("compress", false));
|
this.compress = repositorySettings.settings().getAsBoolean("compress", componentSettings.getAsBoolean("compress", false));
|
||||||
this.basePath = BlobPath.cleanPath();
|
this.basePath = BlobPath.cleanPath();
|
||||||
|
|
|
@ -63,7 +63,7 @@ public class URLRepository extends BlobStoreRepository {
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
@Inject
|
@Inject
|
||||||
public URLRepository(RepositoryName name, RepositorySettings repositorySettings, ThreadPool threadPool, IndexShardRepository indexShardRepository) throws IOException {
|
public URLRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
|
||||||
super(name.getName(), repositorySettings, indexShardRepository);
|
super(name.getName(), repositorySettings, indexShardRepository);
|
||||||
URL url;
|
URL url;
|
||||||
String path = repositorySettings.settings().get("url", componentSettings.get("url"));
|
String path = repositorySettings.settings().get("url", componentSettings.get("url"));
|
||||||
|
@ -73,7 +73,7 @@ public class URLRepository extends BlobStoreRepository {
|
||||||
url = new URL(path);
|
url = new URL(path);
|
||||||
}
|
}
|
||||||
listDirectories = repositorySettings.settings().getAsBoolean("list_directories", componentSettings.getAsBoolean("list_directories", true));
|
listDirectories = repositorySettings.settings().getAsBoolean("list_directories", componentSettings.getAsBoolean("list_directories", true));
|
||||||
blobStore = new URLBlobStore(componentSettings, threadPool, url);
|
blobStore = new URLBlobStore(componentSettings, url);
|
||||||
basePath = BlobPath.cleanPath();
|
basePath = BlobPath.cleanPath();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -65,7 +65,6 @@ public class RestThreadPoolAction extends AbstractCatAction {
|
||||||
ThreadPool.Names.REFRESH,
|
ThreadPool.Names.REFRESH,
|
||||||
ThreadPool.Names.SEARCH,
|
ThreadPool.Names.SEARCH,
|
||||||
ThreadPool.Names.SNAPSHOT,
|
ThreadPool.Names.SNAPSHOT,
|
||||||
ThreadPool.Names.SNAPSHOT_DATA,
|
|
||||||
ThreadPool.Names.SUGGEST,
|
ThreadPool.Names.SUGGEST,
|
||||||
ThreadPool.Names.WARMER
|
ThreadPool.Names.WARMER
|
||||||
};
|
};
|
||||||
|
|
|
@ -78,7 +78,6 @@ public class ThreadPool extends AbstractComponent {
|
||||||
public static final String REFRESH = "refresh";
|
public static final String REFRESH = "refresh";
|
||||||
public static final String WARMER = "warmer";
|
public static final String WARMER = "warmer";
|
||||||
public static final String SNAPSHOT = "snapshot";
|
public static final String SNAPSHOT = "snapshot";
|
||||||
public static final String SNAPSHOT_DATA = "snapshot_data";
|
|
||||||
public static final String OPTIMIZE = "optimize";
|
public static final String OPTIMIZE = "optimize";
|
||||||
public static final String BENCH = "bench";
|
public static final String BENCH = "bench";
|
||||||
}
|
}
|
||||||
|
@ -125,7 +124,6 @@ public class ThreadPool extends AbstractComponent {
|
||||||
.put(Names.REFRESH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt10).build())
|
.put(Names.REFRESH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt10).build())
|
||||||
.put(Names.WARMER, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
|
.put(Names.WARMER, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
|
||||||
.put(Names.SNAPSHOT, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
|
.put(Names.SNAPSHOT, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
|
||||||
.put(Names.SNAPSHOT_DATA, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", 5).build())
|
|
||||||
.put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build())
|
.put(Names.OPTIMIZE, settingsBuilder().put("type", "fixed").put("size", 1).build())
|
||||||
.put(Names.BENCH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
|
.put(Names.BENCH, settingsBuilder().put("type", "scaling").put("keep_alive", "5m").put("size", halfProcMaxAt5).build())
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -0,0 +1,67 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.common.blobstore;
|
||||||
|
|
||||||
|
import com.carrotsearch.randomizedtesting.LifecycleScope;
|
||||||
|
import org.apache.lucene.util.BytesRef;
|
||||||
|
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
|
||||||
|
import org.elasticsearch.common.settings.ImmutableSettings;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||||
|
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||||
|
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
public class BlobStoreTest extends ElasticsearchTestCase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWriteRead() throws IOException {
|
||||||
|
BlobContainer store = newBlobContainer();
|
||||||
|
int length = randomIntBetween(10, scaledRandomIntBetween(1024, 1 << 16));
|
||||||
|
byte[] data = new byte[length];
|
||||||
|
for (int i = 0; i < data.length; i++) {
|
||||||
|
data[i] = (byte) randomInt();
|
||||||
|
}
|
||||||
|
try (OutputStream stream = store.createOutput("foobar")) {
|
||||||
|
stream.write(data);
|
||||||
|
}
|
||||||
|
InputStream stream = store.openInput("foobar");
|
||||||
|
|
||||||
|
BytesRef target = new BytesRef();
|
||||||
|
while (target.length < data.length) {
|
||||||
|
byte[] buffer = new byte[scaledRandomIntBetween(1, data.length - target.length)];
|
||||||
|
int offset = scaledRandomIntBetween(0, buffer.length - 1);
|
||||||
|
int read = stream.read(buffer, offset, buffer.length - offset);
|
||||||
|
target.append(new BytesRef(buffer, offset, read));
|
||||||
|
}
|
||||||
|
assertEquals(data.length, target.length);
|
||||||
|
assertArrayEquals(data, Arrays.copyOfRange(target.bytes, target.offset, target.length));
|
||||||
|
}
|
||||||
|
|
||||||
|
protected BlobContainer newBlobContainer() {
|
||||||
|
File tempDir = newTempDir(LifecycleScope.TEST);
|
||||||
|
Settings settings = randomBoolean() ? ImmutableSettings.EMPTY : ImmutableSettings.builder().put("buffer_size", new ByteSizeValue(randomIntBetween(1, 100), ByteSizeUnit.KB)).build();
|
||||||
|
FsBlobStore store = new FsBlobStore(settings, tempDir);
|
||||||
|
return store.blobContainer(new BlobPath());
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,133 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
package org.elasticsearch.index.snapshots.blobstore;
|
||||||
|
|
||||||
|
import com.carrotsearch.randomizedtesting.generators.RandomInts;
|
||||||
|
import org.elasticsearch.test.ElasticsearchTestCase;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.*;
|
||||||
|
import java.util.Random;
|
||||||
|
|
||||||
|
import static org.hamcrest.Matchers.equalTo;
|
||||||
|
|
||||||
|
public class SlicedInputStreamTest extends ElasticsearchTestCase {
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void readRandom() throws IOException {
|
||||||
|
int parts = randomIntBetween(1, 20);
|
||||||
|
ByteArrayOutputStream stream = new ByteArrayOutputStream();
|
||||||
|
int numWriteOps = scaledRandomIntBetween(1000, 10000);
|
||||||
|
final long seed = randomLong();
|
||||||
|
Random random = new Random(seed);
|
||||||
|
for (int i = 0; i < numWriteOps; i++) {
|
||||||
|
switch(random.nextInt(5)) {
|
||||||
|
case 1:
|
||||||
|
stream.write(random.nextInt(Byte.MAX_VALUE));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
stream.write(randomBytes(random));
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
final CheckClosedInputStream[] streams = new CheckClosedInputStream[parts];
|
||||||
|
byte[] bytes = stream.toByteArray();
|
||||||
|
int slice = bytes.length / parts;
|
||||||
|
int offset = 0;
|
||||||
|
int length;
|
||||||
|
for (int i = 0; i < parts; i++) {
|
||||||
|
length = i == parts-1 ? bytes.length-offset : slice;
|
||||||
|
streams[i] = new CheckClosedInputStream(new ByteArrayInputStream(bytes, offset, length));
|
||||||
|
offset += length;
|
||||||
|
}
|
||||||
|
|
||||||
|
SlicedInputStream input = new SlicedInputStream(parts) {
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected InputStream openSlice(long slice) throws IOException {
|
||||||
|
return streams[(int)slice];
|
||||||
|
}
|
||||||
|
};
|
||||||
|
random = new Random(seed);
|
||||||
|
assertThat(input.available(), equalTo(streams[0].available()));
|
||||||
|
for (int i = 0; i < numWriteOps; i++) {
|
||||||
|
switch(random.nextInt(5)) {
|
||||||
|
case 1:
|
||||||
|
assertThat(random.nextInt(Byte.MAX_VALUE), equalTo(input.read()));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
byte[] b = randomBytes(random);
|
||||||
|
byte[] buffer = new byte[b.length];
|
||||||
|
int read = readFully(input, buffer);
|
||||||
|
assertThat(b.length, equalTo(read));
|
||||||
|
assertArrayEquals(b, buffer);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
assertThat(input.available(), equalTo(0));
|
||||||
|
for (int i =0; i < streams.length-1; i++) {
|
||||||
|
assertTrue(streams[i].closed);
|
||||||
|
}
|
||||||
|
input.close();
|
||||||
|
|
||||||
|
for (int i =0; i < streams.length; i++) {
|
||||||
|
assertTrue(streams[i].closed);
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
private int readFully(InputStream stream, byte[] buffer) throws IOException {
|
||||||
|
for (int i = 0; i < buffer.length;) {
|
||||||
|
int read = stream.read(buffer, i, buffer.length-i);
|
||||||
|
if (read == -1) {
|
||||||
|
if (i == 0) {
|
||||||
|
return -1;
|
||||||
|
} else {
|
||||||
|
return i;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
i+= read;
|
||||||
|
}
|
||||||
|
return buffer.length;
|
||||||
|
}
|
||||||
|
|
||||||
|
private byte[] randomBytes(Random random) {
|
||||||
|
int length = RandomInts.randomIntBetween(random, 1, 10);
|
||||||
|
byte[] data = new byte[length];
|
||||||
|
random.nextBytes(data);
|
||||||
|
return data;
|
||||||
|
}
|
||||||
|
|
||||||
|
private static final class CheckClosedInputStream extends FilterInputStream {
|
||||||
|
|
||||||
|
public boolean closed = false;
|
||||||
|
|
||||||
|
public CheckClosedInputStream(InputStream in) {
|
||||||
|
super(in);
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
closed = true;
|
||||||
|
super.close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -21,31 +21,22 @@ package org.elasticsearch.snapshots.mockstore;
|
||||||
import com.google.common.collect.ImmutableMap;
|
import com.google.common.collect.ImmutableMap;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class ImmutableBlobContainerWrapper implements ImmutableBlobContainer {
|
public class BlobContainerWrapper implements BlobContainer {
|
||||||
private ImmutableBlobContainer delegate;
|
private BlobContainer delegate;
|
||||||
|
|
||||||
public ImmutableBlobContainerWrapper(ImmutableBlobContainer delegate) {
|
public BlobContainerWrapper(BlobContainer delegate) {
|
||||||
this.delegate = delegate;
|
this.delegate = delegate;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener) {
|
|
||||||
delegate.writeBlob(blobName, is, sizeInBytes, listener);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
|
|
||||||
delegate.writeBlob(blobName, is, sizeInBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public BlobPath path() {
|
public BlobPath path() {
|
||||||
return delegate.path();
|
return delegate.path();
|
||||||
|
@ -57,13 +48,13 @@ public class ImmutableBlobContainerWrapper implements ImmutableBlobContainer {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readBlob(String blobName, ReadBlobListener listener) {
|
public InputStream openInput(String name) throws IOException {
|
||||||
delegate.readBlob(blobName, listener);
|
return delegate.openInput(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public byte[] readBlobFully(String blobName) throws IOException {
|
public OutputStream createOutput(String blobName) throws IOException {
|
||||||
return delegate.readBlobFully(blobName);
|
return delegate.createOutput(blobName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
|
@ -18,9 +18,9 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.snapshots.mockstore;
|
package org.elasticsearch.snapshots.mockstore;
|
||||||
|
|
||||||
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
|
@ -34,8 +34,8 @@ public class BlobStoreWrapper implements BlobStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
|
public BlobContainer blobContainer(BlobPath path) {
|
||||||
return delegate.immutableBlobContainer(path);
|
return delegate.blobContainer(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -24,16 +24,16 @@ import org.elasticsearch.ElasticsearchException;
|
||||||
import org.elasticsearch.common.blobstore.BlobMetaData;
|
import org.elasticsearch.common.blobstore.BlobMetaData;
|
||||||
import org.elasticsearch.common.blobstore.BlobPath;
|
import org.elasticsearch.common.blobstore.BlobPath;
|
||||||
import org.elasticsearch.common.blobstore.BlobStore;
|
import org.elasticsearch.common.blobstore.BlobStore;
|
||||||
import org.elasticsearch.common.blobstore.ImmutableBlobContainer;
|
import org.elasticsearch.common.blobstore.BlobContainer;
|
||||||
import org.elasticsearch.common.inject.Inject;
|
import org.elasticsearch.common.inject.Inject;
|
||||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||||
import org.elasticsearch.repositories.RepositoryName;
|
import org.elasticsearch.repositories.RepositoryName;
|
||||||
import org.elasticsearch.repositories.RepositorySettings;
|
import org.elasticsearch.repositories.RepositorySettings;
|
||||||
import org.elasticsearch.repositories.fs.FsRepository;
|
import org.elasticsearch.repositories.fs.FsRepository;
|
||||||
import org.elasticsearch.threadpool.ThreadPool;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
import java.io.UnsupportedEncodingException;
|
import java.io.UnsupportedEncodingException;
|
||||||
import java.security.MessageDigest;
|
import java.security.MessageDigest;
|
||||||
import java.security.NoSuchAlgorithmException;
|
import java.security.NoSuchAlgorithmException;
|
||||||
|
@ -47,10 +47,6 @@ public class MockRepository extends FsRepository {
|
||||||
|
|
||||||
private final AtomicLong failureCounter = new AtomicLong();
|
private final AtomicLong failureCounter = new AtomicLong();
|
||||||
|
|
||||||
public void resetFailureCount() {
|
|
||||||
failureCounter.set(0);
|
|
||||||
}
|
|
||||||
|
|
||||||
public long getFailureCount() {
|
public long getFailureCount() {
|
||||||
return failureCounter.get();
|
return failureCounter.get();
|
||||||
}
|
}
|
||||||
|
@ -72,8 +68,8 @@ public class MockRepository extends FsRepository {
|
||||||
private volatile boolean blocked = false;
|
private volatile boolean blocked = false;
|
||||||
|
|
||||||
@Inject
|
@Inject
|
||||||
public MockRepository(RepositoryName name, ThreadPool threadPool, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
|
public MockRepository(RepositoryName name, RepositorySettings repositorySettings, IndexShardRepository indexShardRepository) throws IOException {
|
||||||
super(name, repositorySettings, threadPool, indexShardRepository);
|
super(name, repositorySettings, indexShardRepository);
|
||||||
randomControlIOExceptionRate = repositorySettings.settings().getAsDouble("random_control_io_exception_rate", 0.0);
|
randomControlIOExceptionRate = repositorySettings.settings().getAsDouble("random_control_io_exception_rate", 0.0);
|
||||||
randomDataFileIOExceptionRate = repositorySettings.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
|
randomDataFileIOExceptionRate = repositorySettings.settings().getAsDouble("random_data_file_io_exception_rate", 0.0);
|
||||||
blockOnControlFiles = repositorySettings.settings().getAsBoolean("block_on_control", false);
|
blockOnControlFiles = repositorySettings.settings().getAsBoolean("block_on_control", false);
|
||||||
|
@ -134,8 +130,8 @@ public class MockRepository extends FsRepository {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public ImmutableBlobContainer immutableBlobContainer(BlobPath path) {
|
public BlobContainer blobContainer(BlobPath path) {
|
||||||
return new MockImmutableBlobContainer(super.immutableBlobContainer(path));
|
return new MockBlobContainer(super.blobContainer(path));
|
||||||
}
|
}
|
||||||
|
|
||||||
public synchronized void unblockExecution() {
|
public synchronized void unblockExecution() {
|
||||||
|
@ -166,7 +162,7 @@ public class MockRepository extends FsRepository {
|
||||||
return wasBlocked;
|
return wasBlocked;
|
||||||
}
|
}
|
||||||
|
|
||||||
private class MockImmutableBlobContainer extends ImmutableBlobContainerWrapper {
|
private class MockBlobContainer extends BlobContainerWrapper {
|
||||||
private MessageDigest digest;
|
private MessageDigest digest;
|
||||||
|
|
||||||
private boolean shouldFail(String blobName, double probability) {
|
private boolean shouldFail(String blobName, double probability) {
|
||||||
|
@ -232,60 +228,20 @@ public class MockRepository extends FsRepository {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean maybeIOExceptionOrBlock(String blobName, ImmutableBlobContainer.WriterListener listener) {
|
|
||||||
try {
|
|
||||||
maybeIOExceptionOrBlock(blobName);
|
|
||||||
return true;
|
|
||||||
} catch (IOException ex) {
|
|
||||||
listener.onFailure(ex);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private boolean maybeIOExceptionOrBlock(String blobName, ImmutableBlobContainer.ReadBlobListener listener) {
|
public MockBlobContainer(BlobContainer delegate) {
|
||||||
try {
|
|
||||||
maybeIOExceptionOrBlock(blobName);
|
|
||||||
return true;
|
|
||||||
} catch (IOException ex) {
|
|
||||||
listener.onFailure(ex);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
public MockImmutableBlobContainer(ImmutableBlobContainer delegate) {
|
|
||||||
super(delegate);
|
super(delegate);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBlob(String blobName, InputStream is, long sizeInBytes, WriterListener listener) {
|
|
||||||
if (maybeIOExceptionOrBlock(blobName, listener) ) {
|
|
||||||
super.writeBlob(blobName, is, sizeInBytes, listener);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBlob(String blobName, InputStream is, long sizeInBytes) throws IOException {
|
|
||||||
maybeIOExceptionOrBlock(blobName);
|
|
||||||
super.writeBlob(blobName, is, sizeInBytes);
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public boolean blobExists(String blobName) {
|
public boolean blobExists(String blobName) {
|
||||||
return super.blobExists(blobName);
|
return super.blobExists(blobName);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readBlob(String blobName, ReadBlobListener listener) {
|
public InputStream openInput(String name) throws IOException {
|
||||||
if (maybeIOExceptionOrBlock(blobName, listener)) {
|
maybeIOExceptionOrBlock(name);
|
||||||
super.readBlob(blobName, listener);
|
return super.openInput(name);
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public byte[] readBlobFully(String blobName) throws IOException {
|
|
||||||
maybeIOExceptionOrBlock(blobName);
|
|
||||||
return super.readBlobFully(blobName);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -318,6 +274,11 @@ public class MockRepository extends FsRepository {
|
||||||
return super.listBlobsByPrefix(blobNamePrefix);
|
return super.listBlobsByPrefix(blobNamePrefix);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OutputStream createOutput(String blobName) throws IOException {
|
||||||
|
maybeIOExceptionOrBlock(blobName);
|
||||||
|
return super.createOutput(blobName);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -366,7 +366,7 @@ public final class InternalTestCluster extends TestCluster {
|
||||||
for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
|
for (String name : Arrays.asList(ThreadPool.Names.BULK, ThreadPool.Names.FLUSH, ThreadPool.Names.GET,
|
||||||
ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.MERGE, ThreadPool.Names.OPTIMIZE,
|
ThreadPool.Names.INDEX, ThreadPool.Names.MANAGEMENT, ThreadPool.Names.MERGE, ThreadPool.Names.OPTIMIZE,
|
||||||
ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
|
ThreadPool.Names.PERCOLATE, ThreadPool.Names.REFRESH, ThreadPool.Names.SEARCH, ThreadPool.Names.SNAPSHOT,
|
||||||
ThreadPool.Names.SNAPSHOT_DATA, ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
|
ThreadPool.Names.SUGGEST, ThreadPool.Names.WARMER)) {
|
||||||
if (random.nextBoolean()) {
|
if (random.nextBoolean()) {
|
||||||
final String type = RandomPicks.randomFrom(random, Arrays.asList("fixed", "cached", "scaling"));
|
final String type = RandomPicks.randomFrom(random, Arrays.asList("fixed", "cached", "scaling"));
|
||||||
builder.put(ThreadPool.THREADPOOL_GROUP + name + ".type", type);
|
builder.put(ThreadPool.THREADPOOL_GROUP + name + ".type", type);
|
||||||
|
|
Loading…
Reference in New Issue