Peer recovery process can sometimes not reuse the same index files allocated on a possible node, closes #1238.
This commit is contained in:
parent
fa19239d44
commit
8a69910465
|
@ -129,7 +129,7 @@ public abstract class AbstractStore extends AbstractIndexShardComponent implemen
|
||||||
doRenameFile(from, to);
|
doRenameFile(from, to);
|
||||||
synchronized (mutex) {
|
synchronized (mutex) {
|
||||||
StoreFileMetaData fromMetaData = filesMetadata.get(from); // we should always find this one
|
StoreFileMetaData fromMetaData = filesMetadata.get(from); // we should always find this one
|
||||||
StoreFileMetaData toMetaData = new StoreFileMetaData(fromMetaData.name(), fromMetaData.length(), fromMetaData.lastModified(), fromMetaData.checksum());
|
StoreFileMetaData toMetaData = new StoreFileMetaData(to, fromMetaData.length(), fromMetaData.lastModified(), fromMetaData.checksum());
|
||||||
filesMetadata = MapBuilder.newMapBuilder(filesMetadata).remove(from).put(to, toMetaData).immutableMap();
|
filesMetadata = MapBuilder.newMapBuilder(filesMetadata).remove(from).put(to, toMetaData).immutableMap();
|
||||||
files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
|
files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,11 @@ import org.apache.lucene.store.FSDirectory;
|
||||||
import org.elasticsearch.ElasticSearchException;
|
import org.elasticsearch.ElasticSearchException;
|
||||||
import org.elasticsearch.action.ActionFuture;
|
import org.elasticsearch.action.ActionFuture;
|
||||||
import org.elasticsearch.action.FailedNodeException;
|
import org.elasticsearch.action.FailedNodeException;
|
||||||
import org.elasticsearch.action.support.nodes.*;
|
import org.elasticsearch.action.support.nodes.NodeOperationRequest;
|
||||||
|
import org.elasticsearch.action.support.nodes.NodeOperationResponse;
|
||||||
|
import org.elasticsearch.action.support.nodes.NodesOperationRequest;
|
||||||
|
import org.elasticsearch.action.support.nodes.NodesOperationResponse;
|
||||||
|
import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
|
||||||
import org.elasticsearch.cluster.ClusterName;
|
import org.elasticsearch.cluster.ClusterName;
|
||||||
import org.elasticsearch.cluster.ClusterService;
|
import org.elasticsearch.cluster.ClusterService;
|
||||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||||
|
@ -166,8 +170,9 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
|
||||||
Map<String, StoreFileMetaData> files = Maps.newHashMap();
|
Map<String, StoreFileMetaData> files = Maps.newHashMap();
|
||||||
// read the checksums file
|
// read the checksums file
|
||||||
FSDirectory directory = FSDirectory.open(indexFile);
|
FSDirectory directory = FSDirectory.open(indexFile);
|
||||||
|
Map<String, String> checksums = null;
|
||||||
try {
|
try {
|
||||||
Map<String, String> checksums = AbstractStore.readChecksums(directory);
|
checksums = AbstractStore.readChecksums(directory);
|
||||||
for (File file : indexFile.listFiles()) {
|
for (File file : indexFile.listFiles()) {
|
||||||
// BACKWARD CKS SUPPORT
|
// BACKWARD CKS SUPPORT
|
||||||
if (file.getName().endsWith(".cks")) {
|
if (file.getName().endsWith(".cks")) {
|
||||||
|
@ -193,13 +198,13 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
|
||||||
// try and load the checksum
|
// try and load the checksum
|
||||||
String checksum = null;
|
String checksum = null;
|
||||||
File checksumFile = new File(file.getParentFile(), file.getName() + ".cks");
|
File checksumFile = new File(file.getParentFile(), file.getName() + ".cks");
|
||||||
if (checksumFile.exists()) {
|
if (checksumFile.exists() && (checksums == null || !checksums.containsKey(file.getName()))) {
|
||||||
byte[] checksumBytes = Streams.copyToByteArray(checksumFile);
|
byte[] checksumBytes = Streams.copyToByteArray(checksumFile);
|
||||||
if (checksumBytes.length > 0) {
|
if (checksumBytes.length > 0) {
|
||||||
checksum = Unicode.fromBytes(checksumBytes);
|
checksum = Unicode.fromBytes(checksumBytes);
|
||||||
}
|
}
|
||||||
|
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), file.lastModified(), checksum));
|
||||||
}
|
}
|
||||||
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), file.lastModified(), checksum));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return new StoreFilesMetaData(false, shardId, files);
|
return new StoreFilesMetaData(false, shardId, files);
|
||||||
|
|
Loading…
Reference in New Issue