clean and refactor the way fs index gateway work, should work nicer with NFS
This commit is contained in:
parent
0586bcd003
commit
38d8fad8d0
|
@ -19,6 +19,7 @@
|
||||||
|
|
||||||
package org.elasticsearch.index.gateway;
|
package org.elasticsearch.index.gateway;
|
||||||
|
|
||||||
|
import org.elasticsearch.ElasticSearchIllegalStateException;
|
||||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||||
import org.elasticsearch.index.shard.IndexShardComponent;
|
import org.elasticsearch.index.shard.IndexShardComponent;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
import org.elasticsearch.index.translog.Translog;
|
||||||
|
@ -36,7 +37,7 @@ public interface IndexShardGateway extends IndexShardComponent {
|
||||||
/**
|
/**
|
||||||
* Snapshots the given shard into the gateway.
|
* Snapshots the given shard into the gateway.
|
||||||
*/
|
*/
|
||||||
void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot);
|
void snapshot(Snapshot snapshot);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns <tt>true</tt> if this gateway requires scheduling management for snapshot
|
* Returns <tt>true</tt> if this gateway requires scheduling management for snapshot
|
||||||
|
@ -45,4 +46,68 @@ public interface IndexShardGateway extends IndexShardComponent {
|
||||||
boolean requiresSnapshotScheduling();
|
boolean requiresSnapshotScheduling();
|
||||||
|
|
||||||
void close();
|
void close();
|
||||||
|
|
||||||
|
public static class Snapshot {
|
||||||
|
private final SnapshotIndexCommit indexCommit;
|
||||||
|
private final Translog.Snapshot translogSnapshot;
|
||||||
|
|
||||||
|
private final long lastIndexVersion;
|
||||||
|
private final long lastTranslogId;
|
||||||
|
private final int lastTranslogSize;
|
||||||
|
|
||||||
|
public Snapshot(SnapshotIndexCommit indexCommit, Translog.Snapshot translogSnapshot, long lastIndexVersion, long lastTranslogId, int lastTranslogSize) {
|
||||||
|
this.indexCommit = indexCommit;
|
||||||
|
this.translogSnapshot = translogSnapshot;
|
||||||
|
this.lastIndexVersion = lastIndexVersion;
|
||||||
|
this.lastTranslogId = lastTranslogId;
|
||||||
|
this.lastTranslogSize = lastTranslogSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates that the index has changed from the latest snapshot.
|
||||||
|
*/
|
||||||
|
public boolean indexChanged() {
|
||||||
|
return lastIndexVersion != indexCommit.getVersion();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates that a new transaction log has been created. Note check this <b>before</b> you
|
||||||
|
* check {@link #sameTranslogNewOperations()}.
|
||||||
|
*/
|
||||||
|
public boolean newTranslogCreated() {
|
||||||
|
return translogSnapshot.translogId() != lastTranslogId;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Indicates that the same translog exists, but new operations have been appended to it. Throws
|
||||||
|
* {@link ElasticSearchIllegalStateException} if {@link #newTranslogCreated()} is <tt>true</tt>, so
|
||||||
|
* always check that first.
|
||||||
|
*/
|
||||||
|
public boolean sameTranslogNewOperations() {
|
||||||
|
if (newTranslogCreated()) {
|
||||||
|
throw new ElasticSearchIllegalStateException("Should not be called when there is a new translog");
|
||||||
|
}
|
||||||
|
return translogSnapshot.size() > lastTranslogSize;
|
||||||
|
}
|
||||||
|
|
||||||
|
public SnapshotIndexCommit indexCommit() {
|
||||||
|
return indexCommit;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Translog.Snapshot translogSnapshot() {
|
||||||
|
return translogSnapshot;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long lastIndexVersion() {
|
||||||
|
return lastIndexVersion;
|
||||||
|
}
|
||||||
|
|
||||||
|
public long lastTranslogId() {
|
||||||
|
return lastTranslogId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public int lastTranslogSize() {
|
||||||
|
return lastTranslogSize;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -110,14 +110,9 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
|
||||||
StopWatch stopWatch = new StopWatch().start();
|
StopWatch stopWatch = new StopWatch().start();
|
||||||
RecoveryStatus recoveryStatus = shardGateway.recover();
|
RecoveryStatus recoveryStatus = shardGateway.recover();
|
||||||
|
|
||||||
// update the last up to date values
|
lastIndexVersion = recoveryStatus.index().version();
|
||||||
indexShard.snapshot(new Engine.SnapshotHandler() {
|
lastTranslogId = recoveryStatus.translog().translogId();
|
||||||
@Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
|
lastTranslogSize = recoveryStatus.translog().numberOfOperations();
|
||||||
lastIndexVersion = snapshotIndexCommit.getVersion();
|
|
||||||
lastTranslogId = translogSnapshot.translogId();
|
|
||||||
lastTranslogSize = translogSnapshot.size();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// start the shard if the gateway has not started it already
|
// start the shard if the gateway has not started it already
|
||||||
if (indexShard.state() != IndexShardState.STARTED) {
|
if (indexShard.state() != IndexShardState.STARTED) {
|
||||||
|
@ -127,8 +122,8 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
|
||||||
if (logger.isDebugEnabled()) {
|
if (logger.isDebugEnabled()) {
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
sb.append("Recovery completed from ").append(shardGateway).append(", took [").append(stopWatch.totalTime()).append("]\n");
|
sb.append("Recovery completed from ").append(shardGateway).append(", took [").append(stopWatch.totalTime()).append("]\n");
|
||||||
sb.append(" Index : numberOfFiles [").append(recoveryStatus.index().numberOfFiles()).append("] with totalSize [").append(recoveryStatus.index().totalSize()).append("]\n");
|
sb.append(" Index : numberOfFiles [").append(recoveryStatus.index().numberOfFiles()).append("] with totalSize [").append(recoveryStatus.index().totalSize()).append("]\n");
|
||||||
sb.append(" Translog : numberOfOperations [").append(recoveryStatus.translog().numberOfOperations()).append("] with totalSize [").append(recoveryStatus.translog().totalSize()).append("]");
|
sb.append(" Translog : translogId [").append(recoveryStatus.translog().translogId()).append(", numberOfOperations [").append(recoveryStatus.translog().numberOfOperations()).append("] with totalSize [").append(recoveryStatus.translog().totalSize()).append("]");
|
||||||
logger.debug(sb.toString());
|
logger.debug(sb.toString());
|
||||||
}
|
}
|
||||||
// refresh the shard
|
// refresh the shard
|
||||||
|
@ -156,7 +151,7 @@ public class IndexShardGatewayService extends AbstractIndexShardComponent implem
|
||||||
@Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
|
@Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
|
||||||
if (lastIndexVersion != snapshotIndexCommit.getVersion() || lastTranslogId != translogSnapshot.translogId() || lastTranslogSize != translogSnapshot.size()) {
|
if (lastIndexVersion != snapshotIndexCommit.getVersion() || lastTranslogId != translogSnapshot.translogId() || lastTranslogSize != translogSnapshot.size()) {
|
||||||
|
|
||||||
shardGateway.snapshot(snapshotIndexCommit, translogSnapshot);
|
shardGateway.snapshot(new IndexShardGateway.Snapshot(snapshotIndexCommit, translogSnapshot, lastIndexVersion, lastTranslogId, lastTranslogSize));
|
||||||
|
|
||||||
lastIndexVersion = snapshotIndexCommit.getVersion();
|
lastIndexVersion = snapshotIndexCommit.getVersion();
|
||||||
lastTranslogId = translogSnapshot.translogId();
|
lastTranslogId = translogSnapshot.translogId();
|
||||||
|
|
|
@ -44,14 +44,23 @@ public class RecoveryStatus {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Translog {
|
public static class Translog {
|
||||||
|
private long translogId;
|
||||||
private int numberOfOperations;
|
private int numberOfOperations;
|
||||||
private SizeValue totalSize;
|
private SizeValue totalSize;
|
||||||
|
|
||||||
public Translog(int numberOfOperations, SizeValue totalSize) {
|
public Translog(long translogId, int numberOfOperations, SizeValue totalSize) {
|
||||||
|
this.translogId = translogId;
|
||||||
this.numberOfOperations = numberOfOperations;
|
this.numberOfOperations = numberOfOperations;
|
||||||
this.totalSize = totalSize;
|
this.totalSize = totalSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* The translog id recovered, <tt>-1</tt> indicating no translog.
|
||||||
|
*/
|
||||||
|
public long translogId() {
|
||||||
|
return translogId;
|
||||||
|
}
|
||||||
|
|
||||||
public int numberOfOperations() {
|
public int numberOfOperations() {
|
||||||
return numberOfOperations;
|
return numberOfOperations;
|
||||||
}
|
}
|
||||||
|
@ -62,14 +71,19 @@ public class RecoveryStatus {
|
||||||
}
|
}
|
||||||
|
|
||||||
public static class Index {
|
public static class Index {
|
||||||
|
private long version;
|
||||||
private int numberOfFiles;
|
private int numberOfFiles;
|
||||||
private SizeValue totalSize;
|
private SizeValue totalSize;
|
||||||
|
|
||||||
public Index(int numberOfFiles, SizeValue totalSize) {
|
public Index(long version, int numberOfFiles, SizeValue totalSize) {
|
||||||
this.numberOfFiles = numberOfFiles;
|
this.numberOfFiles = numberOfFiles;
|
||||||
this.totalSize = totalSize;
|
this.totalSize = totalSize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public long version() {
|
||||||
|
return this.version;
|
||||||
|
}
|
||||||
|
|
||||||
public int numberOfFiles() {
|
public int numberOfFiles() {
|
||||||
return numberOfFiles;
|
return numberOfFiles;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,11 +20,9 @@
|
||||||
package org.elasticsearch.index.gateway.fs;
|
package org.elasticsearch.index.gateway.fs;
|
||||||
|
|
||||||
import com.google.inject.Inject;
|
import com.google.inject.Inject;
|
||||||
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
import org.elasticsearch.ElasticSearchIllegalStateException;
|
|
||||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
||||||
import org.elasticsearch.index.engine.Engine;
|
|
||||||
import org.elasticsearch.index.engine.EngineException;
|
|
||||||
import org.elasticsearch.index.gateway.IndexShardGateway;
|
import org.elasticsearch.index.gateway.IndexShardGateway;
|
||||||
import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
|
import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
|
||||||
import org.elasticsearch.index.gateway.IndexShardGatewaySnapshotFailedException;
|
import org.elasticsearch.index.gateway.IndexShardGatewaySnapshotFailedException;
|
||||||
|
@ -74,12 +72,6 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
|
|
||||||
private final File locationTranslog;
|
private final File locationTranslog;
|
||||||
|
|
||||||
private volatile long lastIndexVersion;
|
|
||||||
|
|
||||||
private volatile long lastTranslogId = -1;
|
|
||||||
|
|
||||||
private volatile int lastTranslogSize;
|
|
||||||
|
|
||||||
@Inject public FsIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, FsIndexGateway fsIndexGateway, IndexShard indexShard, Store store) {
|
@Inject public FsIndexShardGateway(ShardId shardId, @IndexSettings Settings indexSettings, ThreadPool threadPool, FsIndexGateway fsIndexGateway, IndexShard indexShard, Store store) {
|
||||||
super(shardId, indexSettings);
|
super(shardId, indexSettings);
|
||||||
this.threadPool = threadPool;
|
this.threadPool = threadPool;
|
||||||
|
@ -107,27 +99,24 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
@Override public RecoveryStatus recover() throws IndexShardGatewayRecoveryException {
|
@Override public RecoveryStatus recover() throws IndexShardGatewayRecoveryException {
|
||||||
RecoveryStatus.Index recoveryStatusIndex = recoverIndex();
|
RecoveryStatus.Index recoveryStatusIndex = recoverIndex();
|
||||||
RecoveryStatus.Translog recoveryStatusTranslog = recoverTranslog();
|
RecoveryStatus.Translog recoveryStatusTranslog = recoverTranslog();
|
||||||
// update the last up to date values
|
|
||||||
indexShard.snapshot(new Engine.SnapshotHandler() {
|
|
||||||
@Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) throws EngineException {
|
|
||||||
lastIndexVersion = snapshotIndexCommit.getVersion();
|
|
||||||
lastTranslogId = translogSnapshot.translogId();
|
|
||||||
lastTranslogSize = translogSnapshot.size();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
return new RecoveryStatus(recoveryStatusIndex, recoveryStatusTranslog);
|
return new RecoveryStatus(recoveryStatusIndex, recoveryStatusTranslog);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override public void snapshot(final SnapshotIndexCommit snapshotIndexCommit, final Translog.Snapshot translogSnapshot) {
|
@Override public void snapshot(Snapshot snapshot) {
|
||||||
boolean indexDirty = false;
|
boolean indexDirty = false;
|
||||||
boolean translogDirty = false;
|
boolean translogDirty = false;
|
||||||
|
|
||||||
if (lastIndexVersion != snapshotIndexCommit.getVersion()) {
|
final SnapshotIndexCommit snapshotIndexCommit = snapshot.indexCommit();
|
||||||
|
final Translog.Snapshot translogSnapshot = snapshot.translogSnapshot();
|
||||||
|
|
||||||
|
if (snapshot.indexChanged()) {
|
||||||
indexDirty = true;
|
indexDirty = true;
|
||||||
// snapshot into the index
|
// snapshot into the index
|
||||||
final CountDownLatch latch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
|
final CountDownLatch latch = new CountDownLatch(snapshotIndexCommit.getFiles().length);
|
||||||
final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
|
final AtomicReference<Exception> lastException = new AtomicReference<Exception>();
|
||||||
for (final String fileName : snapshotIndexCommit.getFiles()) {
|
for (final String fileName : snapshotIndexCommit.getFiles()) {
|
||||||
|
// don't copy over the segments file, it will be copied over later on as part of the
|
||||||
|
// final snapshot phase
|
||||||
if (fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
|
if (fileName.equals(snapshotIndexCommit.getSegmentsFileName())) {
|
||||||
latch.countDown();
|
latch.countDown();
|
||||||
continue;
|
continue;
|
||||||
|
@ -170,21 +159,14 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
File translogFile = new File(locationTranslog, "translog-" + translogSnapshot.translogId());
|
File translogFile = new File(locationTranslog, "translog-" + translogSnapshot.translogId());
|
||||||
RandomAccessFile translogRaf = null;
|
RandomAccessFile translogRaf = null;
|
||||||
|
|
||||||
// if we have a different trnaslogId (or the file does not exists at all), we want to flush the full
|
// if we have a different trnaslogId we want to flush the full translog to a new file (based on the translogId).
|
||||||
// translog to a new file (based on the translogId). If we still work on existing translog, just
|
// If we still work on existing translog, just append the latest translog operations
|
||||||
// append the latest translog operations
|
if (snapshot.newTranslogCreated()) {
|
||||||
|
|
||||||
if (translogSnapshot.translogId() != lastTranslogId || !translogFile.exists()) {
|
|
||||||
translogDirty = true;
|
translogDirty = true;
|
||||||
try {
|
try {
|
||||||
translogRaf = new RandomAccessFile(translogFile, "rw");
|
translogRaf = new RandomAccessFile(translogFile, "rw");
|
||||||
StreamOutput out = new DataOutputStreamOutput(translogRaf);
|
StreamOutput out = new DataOutputStreamOutput(translogRaf);
|
||||||
out.writeInt(-1); // write the number of operations header with -1 currently
|
out.writeInt(-1); // write the number of operations header with -1 currently
|
||||||
// double check that we managed to read/write correctly
|
|
||||||
translogRaf.seek(0);
|
|
||||||
if (translogRaf.readInt() != -1) {
|
|
||||||
throw new ElasticSearchIllegalStateException("Wrote to snapshot file [" + translogFile + "] but did not read...");
|
|
||||||
}
|
|
||||||
for (Translog.Operation operation : translogSnapshot) {
|
for (Translog.Operation operation : translogSnapshot) {
|
||||||
writeTranslogOperation(out, operation);
|
writeTranslogOperation(out, operation);
|
||||||
}
|
}
|
||||||
|
@ -194,25 +176,27 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
} catch (IOException e1) {
|
} catch (IOException e1) {
|
||||||
// ignore
|
// ignore
|
||||||
}
|
}
|
||||||
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to snapshot translog", e);
|
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to snapshot translog into [" + translogFile + "]", e);
|
||||||
}
|
}
|
||||||
} else if (translogSnapshot.size() > lastTranslogSize) {
|
} else if (snapshot.sameTranslogNewOperations()) {
|
||||||
translogDirty = true;
|
translogDirty = true;
|
||||||
try {
|
try {
|
||||||
translogRaf = new RandomAccessFile(translogFile, "rw");
|
translogRaf = new RandomAccessFile(translogFile, "rw");
|
||||||
// seek to the end, since we append
|
// seek to the end, since we append
|
||||||
translogRaf.seek(translogRaf.length());
|
translogRaf.seek(translogRaf.length());
|
||||||
StreamOutput out = new DataOutputStreamOutput(translogRaf);
|
StreamOutput out = new DataOutputStreamOutput(translogRaf);
|
||||||
for (Translog.Operation operation : translogSnapshot.skipTo(lastTranslogSize)) {
|
for (Translog.Operation operation : translogSnapshot.skipTo(snapshot.lastTranslogSize())) {
|
||||||
writeTranslogOperation(out, operation);
|
writeTranslogOperation(out, operation);
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
try {
|
try {
|
||||||
translogRaf.close();
|
if (translogRaf != null) {
|
||||||
} catch (IOException e1) {
|
translogRaf.close();
|
||||||
|
}
|
||||||
|
} catch (Exception e1) {
|
||||||
// ignore
|
// ignore
|
||||||
}
|
}
|
||||||
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to snapshot translog", e);
|
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to append snapshot translog into [" + translogFile + "]", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -222,6 +206,18 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
copyFromDirectory(snapshotIndexCommit.getDirectory(), snapshotIndexCommit.getSegmentsFileName(),
|
copyFromDirectory(snapshotIndexCommit.getDirectory(), snapshotIndexCommit.getSegmentsFileName(),
|
||||||
new File(locationIndex, snapshotIndexCommit.getSegmentsFileName()));
|
new File(locationIndex, snapshotIndexCommit.getSegmentsFileName()));
|
||||||
}
|
}
|
||||||
|
} catch (Exception e) {
|
||||||
|
try {
|
||||||
|
if (translogRaf != null) {
|
||||||
|
translogRaf.close();
|
||||||
|
}
|
||||||
|
} catch (Exception e1) {
|
||||||
|
// ignore
|
||||||
|
}
|
||||||
|
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to finalize index snapshot into [" + new File(locationIndex, snapshotIndexCommit.getSegmentsFileName()) + "]", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
if (translogDirty) {
|
if (translogDirty) {
|
||||||
translogRaf.seek(0);
|
translogRaf.seek(0);
|
||||||
translogRaf.writeInt(translogSnapshot.size());
|
translogRaf.writeInt(translogSnapshot.size());
|
||||||
|
@ -232,16 +228,18 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
}
|
}
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
try {
|
try {
|
||||||
translogRaf.close();
|
if (translogRaf != null) {
|
||||||
} catch (IOException e1) {
|
translogRaf.close();
|
||||||
|
}
|
||||||
|
} catch (Exception e1) {
|
||||||
// ignore
|
// ignore
|
||||||
}
|
}
|
||||||
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to finalize snapshot", e);
|
throw new IndexShardGatewaySnapshotFailedException(shardId(), "Failed to finalize snapshot into [" + translogFile + "]", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete the old translog
|
// delete the old translog
|
||||||
if (lastTranslogId != translogSnapshot.translogId()) {
|
if (snapshot.newTranslogCreated()) {
|
||||||
new File(locationTranslog, "translog-" + lastTranslogId).delete();
|
new File(locationTranslog, "translog-" + snapshot.lastTranslogId()).delete();
|
||||||
}
|
}
|
||||||
|
|
||||||
// delete files that no longer exists in the index
|
// delete files that no longer exists in the index
|
||||||
|
@ -260,11 +258,6 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
lastIndexVersion = snapshotIndexCommit.getVersion();
|
|
||||||
lastTranslogId = translogSnapshot.translogId();
|
|
||||||
lastTranslogSize = translogSnapshot.size();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private RecoveryStatus.Index recoverIndex() throws IndexShardGatewayRecoveryException {
|
private RecoveryStatus.Index recoverIndex() throws IndexShardGatewayRecoveryException {
|
||||||
|
@ -297,7 +290,17 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
for (File file : files) {
|
for (File file : files) {
|
||||||
totalSize += file.length();
|
totalSize += file.length();
|
||||||
}
|
}
|
||||||
return new RecoveryStatus.Index(files.length, new SizeValue(totalSize, SizeUnit.BYTES));
|
|
||||||
|
long version = -1;
|
||||||
|
try {
|
||||||
|
if (IndexReader.indexExists(store.directory())) {
|
||||||
|
version = IndexReader.getCurrentVersion(store.directory());
|
||||||
|
}
|
||||||
|
} catch (IOException e) {
|
||||||
|
throw new IndexShardGatewayRecoveryException(shardId(), "Failed to fetch index version after copying it over", e);
|
||||||
|
}
|
||||||
|
|
||||||
|
return new RecoveryStatus.Index(version, files.length, new SizeValue(totalSize, SizeUnit.BYTES));
|
||||||
}
|
}
|
||||||
|
|
||||||
private RecoveryStatus.Translog recoverTranslog() throws IndexShardGatewayRecoveryException {
|
private RecoveryStatus.Translog recoverTranslog() throws IndexShardGatewayRecoveryException {
|
||||||
|
@ -307,7 +310,7 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
if (recoveryTranslogId == -1) {
|
if (recoveryTranslogId == -1) {
|
||||||
// no recovery file found, start the shard and bail
|
// no recovery file found, start the shard and bail
|
||||||
indexShard.start();
|
indexShard.start();
|
||||||
return new RecoveryStatus.Translog(0, new SizeValue(0, SizeUnit.BYTES));
|
return new RecoveryStatus.Translog(-1, 0, new SizeValue(0, SizeUnit.BYTES));
|
||||||
}
|
}
|
||||||
File recoveryTranslogFile = new File(locationTranslog, "translog-" + recoveryTranslogId);
|
File recoveryTranslogFile = new File(locationTranslog, "translog-" + recoveryTranslogId);
|
||||||
raf = new RandomAccessFile(recoveryTranslogFile, "r");
|
raf = new RandomAccessFile(recoveryTranslogFile, "r");
|
||||||
|
@ -317,7 +320,7 @@ public class FsIndexShardGateway extends AbstractIndexShardComponent implements
|
||||||
operations.add(readTranslogOperation(new DataInputStreamInput(raf)));
|
operations.add(readTranslogOperation(new DataInputStreamInput(raf)));
|
||||||
}
|
}
|
||||||
indexShard.performRecovery(operations);
|
indexShard.performRecovery(operations);
|
||||||
return new RecoveryStatus.Translog(operations.size(), new SizeValue(recoveryTranslogFile.length(), SizeUnit.BYTES));
|
return new RecoveryStatus.Translog(recoveryTranslogId, operations.size(), new SizeValue(recoveryTranslogFile.length(), SizeUnit.BYTES));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
throw new IndexShardGatewayRecoveryException(shardId(), "Failed to perform recovery of translog", e);
|
throw new IndexShardGatewayRecoveryException(shardId(), "Failed to perform recovery of translog", e);
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
package org.elasticsearch.index.gateway.none;
|
package org.elasticsearch.index.gateway.none;
|
||||||
|
|
||||||
import com.google.inject.Inject;
|
import com.google.inject.Inject;
|
||||||
import org.elasticsearch.index.deletionpolicy.SnapshotIndexCommit;
|
|
||||||
import org.elasticsearch.index.gateway.IndexShardGateway;
|
import org.elasticsearch.index.gateway.IndexShardGateway;
|
||||||
import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
|
import org.elasticsearch.index.gateway.IndexShardGatewayRecoveryException;
|
||||||
import org.elasticsearch.index.gateway.RecoveryStatus;
|
import org.elasticsearch.index.gateway.RecoveryStatus;
|
||||||
|
@ -29,7 +28,6 @@ import org.elasticsearch.index.shard.AbstractIndexShardComponent;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
import org.elasticsearch.index.shard.service.IndexShard;
|
import org.elasticsearch.index.shard.service.IndexShard;
|
||||||
import org.elasticsearch.index.shard.service.InternalIndexShard;
|
import org.elasticsearch.index.shard.service.InternalIndexShard;
|
||||||
import org.elasticsearch.index.translog.Translog;
|
|
||||||
import org.elasticsearch.util.SizeUnit;
|
import org.elasticsearch.util.SizeUnit;
|
||||||
import org.elasticsearch.util.SizeValue;
|
import org.elasticsearch.util.SizeValue;
|
||||||
import org.elasticsearch.util.settings.Settings;
|
import org.elasticsearch.util.settings.Settings;
|
||||||
|
@ -49,10 +47,10 @@ public class NoneIndexShardGateway extends AbstractIndexShardComponent implement
|
||||||
@Override public RecoveryStatus recover() throws IndexShardGatewayRecoveryException {
|
@Override public RecoveryStatus recover() throws IndexShardGatewayRecoveryException {
|
||||||
// in the none case, we simply start the shard
|
// in the none case, we simply start the shard
|
||||||
indexShard.start();
|
indexShard.start();
|
||||||
return new RecoveryStatus(new RecoveryStatus.Index(0, new SizeValue(0, SizeUnit.BYTES)), new RecoveryStatus.Translog(0, new SizeValue(0, SizeUnit.BYTES)));
|
return new RecoveryStatus(new RecoveryStatus.Index(-1, 0, new SizeValue(0, SizeUnit.BYTES)), new RecoveryStatus.Translog(-1, 0, new SizeValue(0, SizeUnit.BYTES)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override public void snapshot(SnapshotIndexCommit snapshotIndexCommit, Translog.Snapshot translogSnapshot) {
|
@Override public void snapshot(Snapshot snapshot) {
|
||||||
// nothing to do here
|
// nothing to do here
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.index.LocalNodeId;
|
||||||
import org.elasticsearch.index.settings.IndexSettings;
|
import org.elasticsearch.index.settings.IndexSettings;
|
||||||
import org.elasticsearch.index.shard.IndexShardLifecycle;
|
import org.elasticsearch.index.shard.IndexShardLifecycle;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
import org.elasticsearch.index.store.support.ForceSyncDirectory;
|
||||||
import org.elasticsearch.util.lucene.store.SwitchDirectory;
|
import org.elasticsearch.util.lucene.store.SwitchDirectory;
|
||||||
import org.elasticsearch.util.settings.Settings;
|
import org.elasticsearch.util.settings.Settings;
|
||||||
|
|
||||||
|
@ -80,7 +81,7 @@ public class MmapFsStore extends AbstractFsStore<Directory> {
|
||||||
return suggestUseCompoundFile;
|
return suggestUseCompoundFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class CustomMMapDirectory extends MMapDirectory {
|
private static class CustomMMapDirectory extends MMapDirectory implements ForceSyncDirectory {
|
||||||
|
|
||||||
private final boolean syncToDisk;
|
private final boolean syncToDisk;
|
||||||
|
|
||||||
|
@ -95,5 +96,9 @@ public class MmapFsStore extends AbstractFsStore<Directory> {
|
||||||
}
|
}
|
||||||
super.sync(name);
|
super.sync(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override public void forceSync(String name) throws IOException {
|
||||||
|
super.sync(name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.LocalNodeId;
|
import org.elasticsearch.index.LocalNodeId;
|
||||||
import org.elasticsearch.index.settings.IndexSettings;
|
import org.elasticsearch.index.settings.IndexSettings;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
import org.elasticsearch.index.store.support.ForceSyncDirectory;
|
||||||
import org.elasticsearch.util.lucene.store.SwitchDirectory;
|
import org.elasticsearch.util.lucene.store.SwitchDirectory;
|
||||||
import org.elasticsearch.util.settings.Settings;
|
import org.elasticsearch.util.settings.Settings;
|
||||||
|
|
||||||
|
@ -78,7 +79,7 @@ public class NioFsStore extends AbstractFsStore<Directory> {
|
||||||
return suggestUseCompoundFile;
|
return suggestUseCompoundFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class CustomNioFSDirectory extends NIOFSDirectory {
|
private static class CustomNioFSDirectory extends NIOFSDirectory implements ForceSyncDirectory {
|
||||||
|
|
||||||
private final boolean syncToDisk;
|
private final boolean syncToDisk;
|
||||||
|
|
||||||
|
@ -93,5 +94,9 @@ public class NioFsStore extends AbstractFsStore<Directory> {
|
||||||
}
|
}
|
||||||
super.sync(name);
|
super.sync(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override public void forceSync(String name) throws IOException {
|
||||||
|
super.sync(name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.env.Environment;
|
||||||
import org.elasticsearch.index.LocalNodeId;
|
import org.elasticsearch.index.LocalNodeId;
|
||||||
import org.elasticsearch.index.settings.IndexSettings;
|
import org.elasticsearch.index.settings.IndexSettings;
|
||||||
import org.elasticsearch.index.shard.ShardId;
|
import org.elasticsearch.index.shard.ShardId;
|
||||||
|
import org.elasticsearch.index.store.support.ForceSyncDirectory;
|
||||||
import org.elasticsearch.util.lucene.store.SwitchDirectory;
|
import org.elasticsearch.util.lucene.store.SwitchDirectory;
|
||||||
import org.elasticsearch.util.settings.Settings;
|
import org.elasticsearch.util.settings.Settings;
|
||||||
|
|
||||||
|
@ -78,7 +79,7 @@ public class SimpleFsStore extends AbstractFsStore<Directory> {
|
||||||
return suggestUseCompoundFile;
|
return suggestUseCompoundFile;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static class CustomSimpleFSDirectory extends SimpleFSDirectory {
|
private static class CustomSimpleFSDirectory extends SimpleFSDirectory implements ForceSyncDirectory {
|
||||||
|
|
||||||
private final boolean syncToDisk;
|
private final boolean syncToDisk;
|
||||||
|
|
||||||
|
@ -93,5 +94,9 @@ public class SimpleFsStore extends AbstractFsStore<Directory> {
|
||||||
}
|
}
|
||||||
super.sync(name);
|
super.sync(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override public void forceSync(String name) throws IOException {
|
||||||
|
super.sync(name);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,36 @@
|
||||||
|
/*
|
||||||
|
* Licensed to Elastic Search and Shay Banon under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. Elastic Search licenses this
|
||||||
|
* file to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.index.store.support;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* A custom directory that allows to forceSync (since the actual directory might disable it)
|
||||||
|
*
|
||||||
|
* @author kimchy (shay.banon)
|
||||||
|
*/
|
||||||
|
public interface ForceSyncDirectory {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Similar to {@link org.apache.lucene.store.Directory#sync(String)} but forces it even if its
|
||||||
|
* disabled.
|
||||||
|
*/
|
||||||
|
void forceSync(String name) throws IOException;
|
||||||
|
}
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.util.lucene;
|
||||||
import org.apache.lucene.index.IndexCommit;
|
import org.apache.lucene.index.IndexCommit;
|
||||||
import org.apache.lucene.index.IndexReader;
|
import org.apache.lucene.index.IndexReader;
|
||||||
import org.apache.lucene.store.*;
|
import org.apache.lucene.store.*;
|
||||||
|
import org.elasticsearch.index.store.support.ForceSyncDirectory;
|
||||||
import org.elasticsearch.util.SizeValue;
|
import org.elasticsearch.util.SizeValue;
|
||||||
import org.elasticsearch.util.io.FileSystemUtils;
|
import org.elasticsearch.util.io.FileSystemUtils;
|
||||||
|
|
||||||
|
@ -158,7 +159,11 @@ public class Directories {
|
||||||
} else {
|
} else {
|
||||||
copyToDirectory(new FileInputStream(copyFrom), dir.createOutput(fileName));
|
copyToDirectory(new FileInputStream(copyFrom), dir.createOutput(fileName));
|
||||||
}
|
}
|
||||||
dir.sync(fileName);
|
if (dir instanceof ForceSyncDirectory) {
|
||||||
|
((ForceSyncDirectory) dir).forceSync(fileName);
|
||||||
|
} else {
|
||||||
|
dir.sync(fileName);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void copyToDirectory(InputStream is, IndexOutput io) throws IOException {
|
public static void copyToDirectory(InputStream is, IndexOutput io) throws IOException {
|
||||||
|
|
|
@ -23,6 +23,7 @@ import com.google.common.collect.ImmutableSet;
|
||||||
import org.apache.lucene.store.Directory;
|
import org.apache.lucene.store.Directory;
|
||||||
import org.apache.lucene.store.IndexInput;
|
import org.apache.lucene.store.IndexInput;
|
||||||
import org.apache.lucene.store.IndexOutput;
|
import org.apache.lucene.store.IndexOutput;
|
||||||
|
import org.elasticsearch.index.store.support.ForceSyncDirectory;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
@ -37,7 +38,7 @@ import java.util.Set;
|
||||||
*
|
*
|
||||||
* @author kimchy (shay.banon)
|
* @author kimchy (shay.banon)
|
||||||
*/
|
*/
|
||||||
public class SwitchDirectory extends Directory {
|
public class SwitchDirectory extends Directory implements ForceSyncDirectory {
|
||||||
|
|
||||||
private final Directory secondaryDir;
|
private final Directory secondaryDir;
|
||||||
|
|
||||||
|
@ -141,6 +142,15 @@ public class SwitchDirectory extends Directory {
|
||||||
getDirectory(name).sync(name);
|
getDirectory(name).sync(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override public void forceSync(String name) throws IOException {
|
||||||
|
Directory dir = getDirectory(name);
|
||||||
|
if (dir instanceof ForceSyncDirectory) {
|
||||||
|
((ForceSyncDirectory) dir).forceSync(name);
|
||||||
|
} else {
|
||||||
|
dir.sync(name);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Override public IndexInput openInput(String name) throws IOException {
|
@Override public IndexInput openInput(String name) throws IOException {
|
||||||
return getDirectory(name).openInput(name);
|
return getDirectory(name).openInput(name);
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,7 +48,6 @@ public class FsMetaDataGatewayTests extends AbstractServersTests {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test public void testIndexActions() throws Exception {
|
@Test public void testIndexActions() throws Exception {
|
||||||
|
|
||||||
buildServer("server1");
|
buildServer("server1");
|
||||||
((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset();
|
((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset();
|
||||||
server("server1").start();
|
server("server1").start();
|
||||||
|
@ -67,7 +66,5 @@ public class FsMetaDataGatewayTests extends AbstractServersTests {
|
||||||
} catch (IndexAlreadyExistsException e) {
|
} catch (IndexAlreadyExistsException e) {
|
||||||
// all is well
|
// all is well
|
||||||
}
|
}
|
||||||
|
|
||||||
((InternalServer) server("server1")).injector().getInstance(Gateway.class).reset();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue