Peer recovery process can sometimes not reuse the same index files allocated on a possible node, closes #1238.

This commit is contained in:
Shay Banon 2011-08-12 04:06:20 +03:00
parent fa19239d44
commit 8a69910465
2 changed files with 10 additions and 5 deletions

View File

@ -129,7 +129,7 @@ public abstract class AbstractStore extends AbstractIndexShardComponent implemen
doRenameFile(from, to);
synchronized (mutex) {
StoreFileMetaData fromMetaData = filesMetadata.get(from); // we should always find this one
StoreFileMetaData toMetaData = new StoreFileMetaData(fromMetaData.name(), fromMetaData.length(), fromMetaData.lastModified(), fromMetaData.checksum());
StoreFileMetaData toMetaData = new StoreFileMetaData(to, fromMetaData.length(), fromMetaData.lastModified(), fromMetaData.checksum());
filesMetadata = MapBuilder.newMapBuilder(filesMetadata).remove(from).put(to, toMetaData).immutableMap();
files = filesMetadata.keySet().toArray(new String[filesMetadata.size()]);
}

View File

@ -23,7 +23,11 @@ import org.apache.lucene.store.FSDirectory;
import org.elasticsearch.ElasticSearchException;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.nodes.*;
import org.elasticsearch.action.support.nodes.NodeOperationRequest;
import org.elasticsearch.action.support.nodes.NodeOperationResponse;
import org.elasticsearch.action.support.nodes.NodesOperationRequest;
import org.elasticsearch.action.support.nodes.NodesOperationResponse;
import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -166,8 +170,9 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
Map<String, StoreFileMetaData> files = Maps.newHashMap();
// read the checksums file
FSDirectory directory = FSDirectory.open(indexFile);
Map<String, String> checksums = null;
try {
Map<String, String> checksums = AbstractStore.readChecksums(directory);
checksums = AbstractStore.readChecksums(directory);
for (File file : indexFile.listFiles()) {
// BACKWARD CKS SUPPORT
if (file.getName().endsWith(".cks")) {
@ -193,13 +198,13 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesOperatio
// try and load the checksum
String checksum = null;
File checksumFile = new File(file.getParentFile(), file.getName() + ".cks");
if (checksumFile.exists()) {
if (checksumFile.exists() && (checksums == null || !checksums.containsKey(file.getName()))) {
byte[] checksumBytes = Streams.copyToByteArray(checksumFile);
if (checksumBytes.length > 0) {
checksum = Unicode.fromBytes(checksumBytes);
}
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), file.lastModified(), checksum));
}
files.put(file.getName(), new StoreFileMetaData(file.getName(), file.length(), file.lastModified(), checksum));
}
return new StoreFilesMetaData(false, shardId, files);