[STORE] Fold two hashFile implemenation into one

This commit is contained in:
Simon Willnauer 2014-09-15 11:03:49 +02:00
parent 723a40ef34
commit ec28d7c465
2 changed files with 19 additions and 25 deletions

View File

@ -26,6 +26,7 @@ import com.google.common.io.ByteStreams;
import org.apache.lucene.index.CorruptIndexException;
import org.apache.lucene.store.*;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.blobstore.*;
@ -621,11 +622,12 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
// to calculate it.
// we might have multiple parts even though the file is small... make sure we read all of it.
try (final InputStream stream = new PartSliceStream(blobContainer, fileInfo)) {
final byte[] bytes = ByteStreams.toByteArray(stream);
assert bytes != null;
assert bytes.length == fileInfo.length() : bytes.length + " != " + fileInfo.length();
final BytesRef spare = new BytesRef(bytes);
Store.MetadataSnapshot.hashFile(fileInfo.metadata().hash(), spare);
BytesRefBuilder builder = new BytesRefBuilder();
Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length());
BytesRef hash = metadata.hash();
hash.bytes = builder.bytes();
hash.offset = 0;
hash.length = builder.length();
}
}
}

View File

@ -27,6 +27,7 @@ import org.apache.lucene.codecs.lucene46.Lucene46SegmentInfoFormat;
import org.apache.lucene.index.*;
import org.apache.lucene.store.*;
import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.apache.lucene.util.IOUtils;
import org.apache.lucene.util.Version;
import org.elasticsearch.ExceptionsHelper;
@ -35,9 +36,11 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.compress.Compressor;
import org.elasticsearch.common.compress.CompressorFactory;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.Streams;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.lucene.Directories;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.lucene.store.InputStreamIndexInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.CloseableIndexComponent;
import org.elasticsearch.index.codec.CodecService;
@ -561,15 +564,15 @@ public class Store extends AbstractIndexShardComponent implements CloseableIndex
private static void checksumFromLuceneFile(Directory directory, String file, ImmutableMap.Builder<String, StoreFileMetaData> builder, ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
final String checksum;
final BytesRef fileHash = new BytesRef();
try (IndexInput in = directory.openInput(file, IOContext.READONCE)) {
final BytesRefBuilder fileHash = new BytesRefBuilder();
try (final IndexInput in = directory.openInput(file, IOContext.READONCE)) {
try {
if (in.length() < CodecUtil.footerLength()) {
// truncated files trigger IAE if we seek negative... these files are really corrupted though
throw new CorruptIndexException("Can't retrieve checksum from file: " + file + " file length must be >= " + CodecUtil.footerLength() + " but was: " + in.length());
}
if (readFileAsHash) {
hashFile(fileHash, in);
hashFile(fileHash, new InputStreamIndexInput(in, in.length()), in.length());
}
checksum = digestToString(CodecUtil.retrieveChecksum(in));
@ -577,30 +580,19 @@ public class Store extends AbstractIndexShardComponent implements CloseableIndex
logger.debug("Can retrieve checksum from file [{}]", ex, file);
throw ex;
}
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), checksum, version, fileHash));
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), checksum, version, fileHash.get()));
}
}
/**
* Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
*/
public static void hashFile(BytesRef fileHash, IndexInput in) throws IOException {
final int len = (int)Math.min(1024 * 1024, in.length()); // for safety we limit this to 1MB
fileHash.offset = 0;
public static void hashFile(BytesRefBuilder fileHash, InputStream in, long size) throws IOException {
final int len = (int)Math.min(1024 * 1024, size); // for safety we limit this to 1MB
fileHash.grow(len);
fileHash.length = len;
in.readBytes(fileHash.bytes, 0, len);
}
/**
* Computes a strong hash value for small files. Note that this method should only be used for files < 1MB
*/
public static void hashFile(BytesRef fileHash, BytesRef source) throws IOException {
final int len = Math.min(1024 * 1024, source.length); // for safety we limit this to 1MB
fileHash.offset = 0;
fileHash.grow(len);
fileHash.length = len;
System.arraycopy(source.bytes, source.offset, fileHash.bytes, 0, len);
fileHash.setLength(len);
Streams.readFully(in, fileHash.bytes(), 0, len);
assert fileHash.length() == len;
}
@Override