Remove support for legacy checksums
Elasticsearch 5.0 doesn't support indices wiht legacy checksums anymore. The last time we write legacy checksums was in 1.3.0 which was based on lucene 4.9 already which means that all files have CRC32 checksums. All indices that Elasticsearch can read today must be written with lucene version >= 4.8 anyway so we can drop this layer of backwards compatibility entirely. Since we are close to upgrading to Lucene 6.0 we should get rid of this in a more contiained change than the lucene upgrade.
This commit is contained in:
parent
4504a58643
commit
5008694ba1
|
@ -926,13 +926,6 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
|
||||||
}
|
}
|
||||||
Store.verify(indexOutput);
|
Store.verify(indexOutput);
|
||||||
indexOutput.close();
|
indexOutput.close();
|
||||||
// write the checksum
|
|
||||||
if (fileInfo.metadata().hasLegacyChecksum()) {
|
|
||||||
Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
|
||||||
legacyChecksums.add(fileInfo.metadata());
|
|
||||||
legacyChecksums.write(store);
|
|
||||||
|
|
||||||
}
|
|
||||||
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
|
store.directory().sync(Collections.singleton(fileInfo.physicalName()));
|
||||||
success = true;
|
success = true;
|
||||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||||
|
|
|
@ -180,7 +180,6 @@ public class BlobStoreIndexShardSnapshot implements ToXContent, FromXContentBuil
|
||||||
*
|
*
|
||||||
* @return file checksum
|
* @return file checksum
|
||||||
*/
|
*/
|
||||||
@Nullable
|
|
||||||
public String checksum() {
|
public String checksum() {
|
||||||
return metadata.checksum();
|
return metadata.checksum();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,123 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.store;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
|
||||||
import org.apache.lucene.store.BufferedChecksum;
|
|
||||||
import org.apache.lucene.store.IndexOutput;
|
|
||||||
|
|
||||||
import java.io.IOException;
|
|
||||||
import java.util.zip.Adler32;
|
|
||||||
import java.util.zip.Checksum;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Implements verification checks to the best extent possible
|
|
||||||
* against legacy segments.
|
|
||||||
* <p>
|
|
||||||
* For files since ES 1.3, we have a lucene checksum, and
|
|
||||||
* we verify both CRC32 + length from that.
|
|
||||||
* For older segment files, we have an elasticsearch Adler32 checksum
|
|
||||||
* and a length, except for commit points.
|
|
||||||
* For older commit points, we only have the length in metadata,
|
|
||||||
* but lucene always wrote a CRC32 checksum we can verify in the future, too.
|
|
||||||
* For (Jurassic?) files, we dont have an Adler32 checksum at all,
|
|
||||||
* since its optional in the protocol. But we always know the length.
|
|
||||||
* @deprecated only to support old segments
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
class LegacyVerification {
|
|
||||||
|
|
||||||
// TODO: add a verifier for old lucene segments_N that also checks CRC.
|
|
||||||
// but for now, at least truncation is detected here (as length will be checked)
|
|
||||||
|
|
||||||
/**
|
|
||||||
* verifies Adler32 + length for index files before lucene 4.8
|
|
||||||
*/
|
|
||||||
static class Adler32VerifyingIndexOutput extends VerifyingIndexOutput {
|
|
||||||
final String adler32;
|
|
||||||
final long length;
|
|
||||||
final Checksum checksum = new BufferedChecksum(new Adler32());
|
|
||||||
long written;
|
|
||||||
|
|
||||||
public Adler32VerifyingIndexOutput(IndexOutput out, String adler32, long length) {
|
|
||||||
super(out);
|
|
||||||
this.adler32 = adler32;
|
|
||||||
this.length = length;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void verify() throws IOException {
|
|
||||||
if (written != length) {
|
|
||||||
throw new CorruptIndexException("expected length=" + length + " != actual length: " + written + " : file truncated?", out.toString());
|
|
||||||
}
|
|
||||||
final String actualChecksum = Store.digestToString(checksum.getValue());
|
|
||||||
if (!adler32.equals(actualChecksum)) {
|
|
||||||
throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + adler32 +
|
|
||||||
" actual=" + actualChecksum, out.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeByte(byte b) throws IOException {
|
|
||||||
out.writeByte(b);
|
|
||||||
checksum.update(b);
|
|
||||||
written++;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBytes(byte[] bytes, int offset, int length) throws IOException {
|
|
||||||
out.writeBytes(bytes, offset, length);
|
|
||||||
checksum.update(bytes, offset, length);
|
|
||||||
written += length;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* verifies length for index files before lucene 4.8
|
|
||||||
*/
|
|
||||||
static class LengthVerifyingIndexOutput extends VerifyingIndexOutput {
|
|
||||||
final long length;
|
|
||||||
long written;
|
|
||||||
|
|
||||||
public LengthVerifyingIndexOutput(IndexOutput out, long length) {
|
|
||||||
super(out);
|
|
||||||
this.length = length;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void verify() throws IOException {
|
|
||||||
if (written != length) {
|
|
||||||
throw new CorruptIndexException("expected length=" + length + " != actual length: " + written + " : file truncated?", out.toString());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeByte(byte b) throws IOException {
|
|
||||||
out.writeByte(b);
|
|
||||||
written++;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
|
||||||
public void writeBytes(byte[] bytes, int offset, int length) throws IOException {
|
|
||||||
out.writeBytes(bytes, offset, length);
|
|
||||||
written += length;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -118,8 +118,6 @@ import static java.util.Collections.unmodifiableMap;
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
|
public class Store extends AbstractIndexShardComponent implements Closeable, RefCounted {
|
||||||
private static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
|
||||||
|
|
||||||
static final String CODEC = "store";
|
static final String CODEC = "store";
|
||||||
static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0
|
static final int VERSION_WRITE_THROWABLE= 2; // we write throwable since 2.0
|
||||||
static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0
|
static final int VERSION_STACK_TRACE = 1; // we write the stack trace too since 1.4.0
|
||||||
|
@ -456,19 +454,9 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
IndexOutput output = directory().createOutput(fileName, context);
|
IndexOutput output = directory().createOutput(fileName, context);
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
try {
|
||||||
if (metadata.hasLegacyChecksum()) {
|
assert metadata.writtenBy() != null;
|
||||||
logger.debug("create legacy adler32 output for {}", fileName);
|
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||||
output = new LegacyVerification.Adler32VerifyingIndexOutput(output, metadata.checksum(), metadata.length());
|
output = new LuceneVerifyingIndexOutput(metadata, output);
|
||||||
} else if (metadata.checksum() == null) {
|
|
||||||
// TODO: when the file is a segments_N, we can still CRC-32 + length for more safety
|
|
||||||
// its had that checksum forever.
|
|
||||||
logger.debug("create legacy length-only output for {}", fileName);
|
|
||||||
output = new LegacyVerification.LengthVerifyingIndexOutput(output, metadata.length());
|
|
||||||
} else {
|
|
||||||
assert metadata.writtenBy() != null;
|
|
||||||
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
|
||||||
output = new LuceneVerifyingIndexOutput(metadata, output);
|
|
||||||
}
|
|
||||||
success = true;
|
success = true;
|
||||||
} finally {
|
} finally {
|
||||||
if (success == false) {
|
if (success == false) {
|
||||||
|
@ -485,12 +473,8 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
}
|
}
|
||||||
|
|
||||||
public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException {
|
public IndexInput openVerifyingInput(String filename, IOContext context, StoreFileMetaData metadata) throws IOException {
|
||||||
if (metadata.hasLegacyChecksum() || metadata.checksum() == null) {
|
|
||||||
logger.debug("open legacy input for {}", filename);
|
|
||||||
return directory().openInput(filename, context);
|
|
||||||
}
|
|
||||||
assert metadata.writtenBy() != null;
|
assert metadata.writtenBy() != null;
|
||||||
assert metadata.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION);
|
assert metadata.writtenBy().onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION);
|
||||||
return new VerifyingIndexInput(directory().openInput(filename, context));
|
return new VerifyingIndexInput(directory().openInput(filename, context));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -518,32 +502,12 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
if (input.length() != md.length()) { // first check the length no matter how old this file is
|
if (input.length() != md.length()) { // first check the length no matter how old this file is
|
||||||
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
|
throw new CorruptIndexException("expected length=" + md.length() + " != actual length: " + input.length() + " : file truncated?", input);
|
||||||
}
|
}
|
||||||
if (md.writtenBy() != null && md.writtenBy().onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
// throw exception if the file is corrupt
|
||||||
// throw exception if the file is corrupt
|
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
||||||
String checksum = Store.digestToString(CodecUtil.checksumEntireFile(input));
|
// throw exception if metadata is inconsistent
|
||||||
// throw exception if metadata is inconsistent
|
if (!checksum.equals(md.checksum())) {
|
||||||
if (!checksum.equals(md.checksum())) {
|
throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum +
|
||||||
throw new CorruptIndexException("inconsistent metadata: lucene checksum=" + checksum +
|
", metadata checksum=" + md.checksum(), input);
|
||||||
", metadata checksum=" + md.checksum(), input);
|
|
||||||
}
|
|
||||||
} else if (md.hasLegacyChecksum()) {
|
|
||||||
// legacy checksum verification - no footer that we need to omit in the checksum!
|
|
||||||
final Checksum checksum = new Adler32();
|
|
||||||
final byte[] buffer = new byte[md.length() > 4096 ? 4096 : (int) md.length()];
|
|
||||||
final long len = input.length();
|
|
||||||
long read = 0;
|
|
||||||
while (len > read) {
|
|
||||||
final long bytesLeft = len - read;
|
|
||||||
final int bytesToRead = bytesLeft < buffer.length ? (int) bytesLeft : buffer.length;
|
|
||||||
input.readBytes(buffer, 0, bytesToRead, false);
|
|
||||||
checksum.update(buffer, 0, bytesToRead);
|
|
||||||
read += bytesToRead;
|
|
||||||
}
|
|
||||||
String adler32 = Store.digestToString(checksum.getValue());
|
|
||||||
if (!adler32.equals(md.checksum())) {
|
|
||||||
throw new CorruptIndexException("checksum failed (hardware problem?) : expected=" + md.checksum() +
|
|
||||||
" actual=" + adler32, input);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -799,7 +763,7 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
final int size = in.readVInt();
|
final int size = in.readVInt();
|
||||||
Map<String, StoreFileMetaData> metadata = new HashMap<>();
|
Map<String, StoreFileMetaData> metadata = new HashMap<>();
|
||||||
for (int i = 0; i < size; i++) {
|
for (int i = 0; i < size; i++) {
|
||||||
StoreFileMetaData meta = StoreFileMetaData.readStoreFileMetaData(in);
|
StoreFileMetaData meta = new StoreFileMetaData(in);
|
||||||
metadata.put(meta.name(), meta);
|
metadata.put(meta.name(), meta);
|
||||||
}
|
}
|
||||||
Map<String, String> commitUserData = new HashMap<>();
|
Map<String, String> commitUserData = new HashMap<>();
|
||||||
|
@ -836,14 +800,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
|
static LoadedMetadata loadMetadata(IndexCommit commit, Directory directory, ESLogger logger) throws IOException {
|
||||||
long numDocs;
|
long numDocs;
|
||||||
Map<String, StoreFileMetaData> builder = new HashMap<>();
|
Map<String, StoreFileMetaData> builder = new HashMap<>();
|
||||||
Map<String, String> checksumMap = readLegacyChecksums(directory).v1();
|
|
||||||
Map<String, String> commitUserDataBuilder = new HashMap<>();
|
Map<String, String> commitUserDataBuilder = new HashMap<>();
|
||||||
try {
|
try {
|
||||||
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
|
final SegmentInfos segmentCommitInfos = Store.readSegmentsInfo(commit, directory);
|
||||||
numDocs = Lucene.getNumDocs(segmentCommitInfos);
|
numDocs = Lucene.getNumDocs(segmentCommitInfos);
|
||||||
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
|
commitUserDataBuilder.putAll(segmentCommitInfos.getUserData());
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
Version maxVersion = Version.LUCENE_4_0; // we don't know which version was used to write so we take the max version.
|
Version maxVersion = segmentCommitInfos.getMinSegmentLuceneVersion(); // we don't know which version was used to write so we take the max version.
|
||||||
for (SegmentCommitInfo info : segmentCommitInfos) {
|
for (SegmentCommitInfo info : segmentCommitInfos) {
|
||||||
final Version version = info.info.getVersion();
|
final Version version = info.info.getVersion();
|
||||||
if (version == null) {
|
if (version == null) {
|
||||||
|
@ -854,26 +817,21 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
maxVersion = version;
|
maxVersion = version;
|
||||||
}
|
}
|
||||||
for (String file : info.files()) {
|
for (String file : info.files()) {
|
||||||
String legacyChecksum = checksumMap.get(file);
|
if (version.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||||
if (version.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
|
||||||
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
|
checksumFromLuceneFile(directory, file, builder, logger, version, SEGMENT_INFO_EXTENSION.equals(IndexFileNames.getExtension(file)));
|
||||||
} else {
|
} else {
|
||||||
builder.put(file, new StoreFileMetaData(file, directory.fileLength(file), legacyChecksum, version));
|
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + version);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (maxVersion == null) {
|
||||||
|
maxVersion = StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION;
|
||||||
|
}
|
||||||
final String segmentsFile = segmentCommitInfos.getSegmentsFileName();
|
final String segmentsFile = segmentCommitInfos.getSegmentsFileName();
|
||||||
String legacyChecksum = checksumMap.get(segmentsFile);
|
if (maxVersion.onOrAfter(StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION)) {
|
||||||
if (maxVersion.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION)) {
|
|
||||||
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
checksumFromLuceneFile(directory, segmentsFile, builder, logger, maxVersion, true);
|
||||||
} else {
|
} else {
|
||||||
final BytesRefBuilder fileHash = new BytesRefBuilder();
|
throw new IllegalStateException("version must be onOrAfter: " + StoreFileMetaData.FIRST_LUCENE_CHECKSUM_VERSION + " but was: " + maxVersion);
|
||||||
final long length;
|
|
||||||
try (final IndexInput in = directory.openInput(segmentsFile, IOContext.READONCE)) {
|
|
||||||
length = in.length();
|
|
||||||
hashFile(fileHash, new InputStreamIndexInput(in, length), length);
|
|
||||||
}
|
|
||||||
builder.put(segmentsFile, new StoreFileMetaData(segmentsFile, length, legacyChecksum, maxVersion, fileHash.get()));
|
|
||||||
}
|
}
|
||||||
} catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
} catch (CorruptIndexException | IndexNotFoundException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
||||||
// we either know the index is corrupted or it's just not there
|
// we either know the index is corrupted or it's just not there
|
||||||
|
@ -898,61 +856,6 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs);
|
return new LoadedMetadata(unmodifiableMap(builder), unmodifiableMap(commitUserDataBuilder), numDocs);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Reads legacy checksum files found in the directory.
|
|
||||||
* <p>
|
|
||||||
* Files are expected to start with _checksums- prefix
|
|
||||||
* followed by long file version. Only file with the highest version is read, all other files are ignored.
|
|
||||||
*
|
|
||||||
* @param directory the directory to read checksums from
|
|
||||||
* @return a map of file checksums and the checksum file version
|
|
||||||
*/
|
|
||||||
@SuppressWarnings("deprecation") // Legacy checksum needs legacy methods
|
|
||||||
static Tuple<Map<String, String>, Long> readLegacyChecksums(Directory directory) throws IOException {
|
|
||||||
synchronized (directory) {
|
|
||||||
long lastFound = -1;
|
|
||||||
for (String name : directory.listAll()) {
|
|
||||||
if (!isChecksum(name)) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
|
|
||||||
if (current > lastFound) {
|
|
||||||
lastFound = current;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (lastFound > -1) {
|
|
||||||
try (IndexInput indexInput = directory.openInput(CHECKSUMS_PREFIX + lastFound, IOContext.READONCE)) {
|
|
||||||
indexInput.readInt(); // version
|
|
||||||
return new Tuple<>(indexInput.readStringStringMap(), lastFound);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return new Tuple<>(new HashMap<>(), -1L);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Deletes all checksum files with version lower than newVersion.
|
|
||||||
*
|
|
||||||
* @param directory the directory to clean
|
|
||||||
* @param newVersion the latest checksum file version
|
|
||||||
*/
|
|
||||||
static void cleanLegacyChecksums(Directory directory, long newVersion) throws IOException {
|
|
||||||
synchronized (directory) {
|
|
||||||
for (String name : directory.listAll()) {
|
|
||||||
if (isChecksum(name)) {
|
|
||||||
long current = Long.parseLong(name.substring(CHECKSUMS_PREFIX.length()));
|
|
||||||
if (current < newVersion) {
|
|
||||||
try {
|
|
||||||
directory.deleteFile(name);
|
|
||||||
} catch (IOException ex) {
|
|
||||||
logger.debug("can't delete old checksum file [{}]", ex, name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder,
|
private static void checksumFromLuceneFile(Directory directory, String file, Map<String, StoreFileMetaData> builder,
|
||||||
ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
|
ESLogger logger, Version version, boolean readFileAsHash) throws IOException {
|
||||||
final String checksum;
|
final String checksum;
|
||||||
|
@ -1221,64 +1124,13 @@ public class Store extends AbstractIndexShardComponent implements Closeable, Ref
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public final static class LegacyChecksums {
|
|
||||||
private final Map<String, String> legacyChecksums = new HashMap<>();
|
|
||||||
|
|
||||||
public void add(StoreFileMetaData metaData) throws IOException {
|
|
||||||
|
|
||||||
if (metaData.hasLegacyChecksum()) {
|
|
||||||
synchronized (this) {
|
|
||||||
// we don't add checksums if they were written by LUCENE_48... now we are using the build in mechanism.
|
|
||||||
legacyChecksums.put(metaData.name(), metaData.checksum());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public synchronized void write(Store store) throws IOException {
|
|
||||||
synchronized (store.directory) {
|
|
||||||
Tuple<Map<String, String>, Long> tuple = MetadataSnapshot.readLegacyChecksums(store.directory);
|
|
||||||
tuple.v1().putAll(legacyChecksums);
|
|
||||||
if (!tuple.v1().isEmpty()) {
|
|
||||||
writeChecksums(store.directory, tuple.v1(), tuple.v2());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@SuppressWarnings("deprecation") // Legacy checksum uses legacy methods
|
|
||||||
synchronized void writeChecksums(Directory directory, Map<String, String> checksums, long lastVersion) throws IOException {
|
|
||||||
// Make sure if clock goes backwards we still move version forwards:
|
|
||||||
long nextVersion = Math.max(lastVersion+1, System.currentTimeMillis());
|
|
||||||
final String checksumName = CHECKSUMS_PREFIX + nextVersion;
|
|
||||||
try (IndexOutput output = directory.createOutput(checksumName, IOContext.DEFAULT)) {
|
|
||||||
output.writeInt(0); // version
|
|
||||||
output.writeStringStringMap(checksums);
|
|
||||||
}
|
|
||||||
directory.sync(Collections.singleton(checksumName));
|
|
||||||
MetadataSnapshot.cleanLegacyChecksums(directory, nextVersion);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void clear() {
|
|
||||||
this.legacyChecksums.clear();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void remove(String name) {
|
|
||||||
legacyChecksums.remove(name);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
public static final String CHECKSUMS_PREFIX = "_checksums-";
|
|
||||||
|
|
||||||
public static boolean isChecksum(String name) {
|
|
||||||
// TODO can we drowp .cks
|
|
||||||
return name.startsWith(CHECKSUMS_PREFIX) || name.endsWith(".cks"); // bwcomapt - .cks used to be a previous checksum file
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup.
|
* Returns true if the file is auto-generated by the store and shouldn't be deleted during cleanup.
|
||||||
* This includes write lock and checksum files
|
* This includes write lock and checksum files
|
||||||
*/
|
*/
|
||||||
public static boolean isAutogenerated(String name) {
|
public static boolean isAutogenerated(String name) {
|
||||||
return IndexWriter.WRITE_LOCK_NAME.equals(name) || isChecksum(name);
|
return IndexWriter.WRITE_LOCK_NAME.equals(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -25,35 +25,42 @@ import org.elasticsearch.common.Nullable;
|
||||||
import org.elasticsearch.common.io.stream.StreamInput;
|
import org.elasticsearch.common.io.stream.StreamInput;
|
||||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||||
import org.elasticsearch.common.io.stream.Streamable;
|
import org.elasticsearch.common.io.stream.Streamable;
|
||||||
|
import org.elasticsearch.common.io.stream.Writeable;
|
||||||
import org.elasticsearch.common.lucene.Lucene;
|
import org.elasticsearch.common.lucene.Lucene;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
import java.util.Objects;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
public class StoreFileMetaData implements Streamable {
|
public class StoreFileMetaData implements Writeable {
|
||||||
|
|
||||||
private String name;
|
public static final Version FIRST_LUCENE_CHECKSUM_VERSION = Version.LUCENE_4_8_0;
|
||||||
|
|
||||||
|
private final String name;
|
||||||
|
|
||||||
// the actual file size on "disk", if compressed, the compressed size
|
// the actual file size on "disk", if compressed, the compressed size
|
||||||
private long length;
|
private final long length;
|
||||||
|
|
||||||
private String checksum;
|
private final String checksum;
|
||||||
|
|
||||||
private Version writtenBy;
|
private final Version writtenBy;
|
||||||
|
|
||||||
private BytesRef hash;
|
private final BytesRef hash;
|
||||||
|
|
||||||
private StoreFileMetaData() {
|
public StoreFileMetaData(StreamInput in) throws IOException {
|
||||||
}
|
name = in.readString();
|
||||||
|
length = in.readVLong();
|
||||||
public StoreFileMetaData(String name, long length) {
|
checksum = in.readString();
|
||||||
this(name, length, null);
|
String versionString = in.readString();
|
||||||
|
assert versionString != null;
|
||||||
|
writtenBy = Lucene.parseVersionLenient(versionString, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||||
|
hash = in.readBytesRef();
|
||||||
}
|
}
|
||||||
|
|
||||||
public StoreFileMetaData(String name, long length, String checksum) {
|
public StoreFileMetaData(String name, long length, String checksum) {
|
||||||
this(name, length, checksum, null, null);
|
this(name, length, checksum, FIRST_LUCENE_CHECKSUM_VERSION);
|
||||||
}
|
}
|
||||||
|
|
||||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) {
|
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy) {
|
||||||
|
@ -61,6 +68,10 @@ public class StoreFileMetaData implements Streamable {
|
||||||
}
|
}
|
||||||
|
|
||||||
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
|
public StoreFileMetaData(String name, long length, String checksum, Version writtenBy, BytesRef hash) {
|
||||||
|
assert writtenBy != null && writtenBy.onOrAfter(FIRST_LUCENE_CHECKSUM_VERSION) : "index version less that "
|
||||||
|
+ FIRST_LUCENE_CHECKSUM_VERSION + " are not supported but got: " + writtenBy;
|
||||||
|
Objects.requireNonNull(writtenBy, "writtenBy must not be null");
|
||||||
|
Objects.requireNonNull(checksum, "checksum must not be null");
|
||||||
this.name = name;
|
this.name = name;
|
||||||
this.length = length;
|
this.length = length;
|
||||||
this.checksum = checksum;
|
this.checksum = checksum;
|
||||||
|
@ -85,10 +96,8 @@ public class StoreFileMetaData implements Streamable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a string representation of the files checksum. Since Lucene 4.8 this is a CRC32 checksum written
|
* Returns a string representation of the files checksum. Since Lucene 4.8 this is a CRC32 checksum written
|
||||||
* by lucene. Previously we use Adler32 on top of Lucene as the checksum algorithm, if {@link #hasLegacyChecksum()} returns
|
* by lucene.
|
||||||
* <code>true</code> this is a Adler32 checksum.
|
|
||||||
*/
|
*/
|
||||||
@Nullable
|
|
||||||
public String checksum() {
|
public String checksum() {
|
||||||
return this.checksum;
|
return this.checksum;
|
||||||
}
|
}
|
||||||
|
@ -104,33 +113,22 @@ public class StoreFileMetaData implements Streamable {
|
||||||
return length == other.length && checksum.equals(other.checksum) && hash.equals(other.hash);
|
return length == other.length && checksum.equals(other.checksum) && hash.equals(other.hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
public static StoreFileMetaData readStoreFileMetaData(StreamInput in) throws IOException {
|
|
||||||
StoreFileMetaData md = new StoreFileMetaData();
|
|
||||||
md.readFrom(in);
|
|
||||||
return md;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return "name [" + name + "], length [" + length + "], checksum [" + checksum + "], writtenBy [" + writtenBy + "]" ;
|
return "name [" + name + "], length [" + length + "], checksum [" + checksum + "], writtenBy [" + writtenBy + "]" ;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void readFrom(StreamInput in) throws IOException {
|
public StoreFileMetaData readFrom(StreamInput in) throws IOException {
|
||||||
name = in.readString();
|
return new StoreFileMetaData(in);
|
||||||
length = in.readVLong();
|
|
||||||
checksum = in.readOptionalString();
|
|
||||||
String versionString = in.readOptionalString();
|
|
||||||
writtenBy = Lucene.parseVersionLenient(versionString, null);
|
|
||||||
hash = in.readBytesRef();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void writeTo(StreamOutput out) throws IOException {
|
public void writeTo(StreamOutput out) throws IOException {
|
||||||
out.writeString(name);
|
out.writeString(name);
|
||||||
out.writeVLong(length);
|
out.writeVLong(length);
|
||||||
out.writeOptionalString(checksum);
|
out.writeString(checksum);
|
||||||
out.writeOptionalString(writtenBy == null ? null : writtenBy.toString());
|
out.writeString(writtenBy.toString());
|
||||||
out.writeBytesRef(hash);
|
out.writeBytesRef(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -141,14 +139,6 @@ public class StoreFileMetaData implements Streamable {
|
||||||
return writtenBy;
|
return writtenBy;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns <code>true</code> iff the checksum is not <code>null</code> and if the file has NOT been written by
|
|
||||||
* a Lucene version greater or equal to Lucene 4.8
|
|
||||||
*/
|
|
||||||
public boolean hasLegacyChecksum() {
|
|
||||||
return checksum != null && (writtenBy == null || writtenBy.onOrAfter(Version.LUCENE_4_8) == false);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a variable length hash of the file represented by this metadata object. This can be the file
|
* Returns a variable length hash of the file represented by this metadata object. This can be the file
|
||||||
* itself if the file is small enough. If the length of the hash is <tt>0</tt> no hash value is available
|
* itself if the file is small enough. If the length of the hash is <tt>0</tt> no hash value is available
|
||||||
|
|
|
@ -76,8 +76,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
private final AtomicBoolean finished = new AtomicBoolean();
|
private final AtomicBoolean finished = new AtomicBoolean();
|
||||||
|
|
||||||
private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
|
private final ConcurrentMap<String, IndexOutput> openIndexOutputs = ConcurrentCollections.newConcurrentMap();
|
||||||
private final Store.LegacyChecksums legacyChecksums = new Store.LegacyChecksums();
|
|
||||||
|
|
||||||
private final CancellableThreads cancellableThreads = new CancellableThreads();
|
private final CancellableThreads cancellableThreads = new CancellableThreads();
|
||||||
|
|
||||||
// last time this status was accessed
|
// last time this status was accessed
|
||||||
|
@ -145,10 +143,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
return state().getStage();
|
return state().getStage();
|
||||||
}
|
}
|
||||||
|
|
||||||
public Store.LegacyChecksums legacyChecksums() {
|
|
||||||
return legacyChecksums;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** renames all temporary files to their true name, potentially overriding existing files */
|
/** renames all temporary files to their true name, potentially overriding existing files */
|
||||||
public void renameAllTempFiles() throws IOException {
|
public void renameAllTempFiles() throws IOException {
|
||||||
ensureRefCount();
|
ensureRefCount();
|
||||||
|
@ -281,7 +275,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
logger.trace("cleaning temporary file [{}]", file);
|
logger.trace("cleaning temporary file [{}]", file);
|
||||||
store.deleteQuiet(file);
|
store.deleteQuiet(file);
|
||||||
}
|
}
|
||||||
legacyChecksums.clear();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -344,8 +337,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
// to recover from in case of a full cluster shutdown just when this code executes...
|
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||||
renameAllTempFiles();
|
renameAllTempFiles();
|
||||||
final Store store = store();
|
final Store store = store();
|
||||||
// now write checksums
|
|
||||||
legacyChecksums().write(store);
|
|
||||||
try {
|
try {
|
||||||
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
store.cleanupAndVerify("recovery CleanFilesRequestHandler", sourceMetaData);
|
||||||
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
} catch (CorruptIndexException | IndexFormatTooNewException | IndexFormatTooOldException ex) {
|
||||||
|
@ -399,8 +390,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
||||||
// we are done
|
// we are done
|
||||||
indexOutput.close();
|
indexOutput.close();
|
||||||
}
|
}
|
||||||
// write the checksum
|
|
||||||
legacyChecksums().add(fileMetaData);
|
|
||||||
final String temporaryFileName = getTempNameForFile(name);
|
final String temporaryFileName = getTempNameForFile(name);
|
||||||
assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
|
assert Arrays.asList(store.directory().listAll()).contains(temporaryFileName);
|
||||||
store.directory().sync(Collections.singleton(temporaryFileName));
|
store.directory().sync(Collections.singleton(temporaryFileName));
|
||||||
|
|
|
@ -109,6 +109,8 @@ public class FileInfoTests extends ESTestCase {
|
||||||
builder.field(Fields.NAME, name);
|
builder.field(Fields.NAME, name);
|
||||||
builder.field(Fields.PHYSICAL_NAME, physicalName);
|
builder.field(Fields.PHYSICAL_NAME, physicalName);
|
||||||
builder.field(Fields.LENGTH, length);
|
builder.field(Fields.LENGTH, length);
|
||||||
|
builder.field(Fields.WRITTEN_BY, Version.LATEST.toString());
|
||||||
|
builder.field(Fields.CHECKSUM, "666");
|
||||||
builder.endObject();
|
builder.endObject();
|
||||||
byte[] xContent = builder.bytes().toBytes();
|
byte[] xContent = builder.bytes().toBytes();
|
||||||
|
|
||||||
|
@ -122,9 +124,9 @@ public class FileInfoTests extends ESTestCase {
|
||||||
assertThat(name, equalTo(parsedInfo.name()));
|
assertThat(name, equalTo(parsedInfo.name()));
|
||||||
assertThat(physicalName, equalTo(parsedInfo.physicalName()));
|
assertThat(physicalName, equalTo(parsedInfo.physicalName()));
|
||||||
assertThat(length, equalTo(parsedInfo.length()));
|
assertThat(length, equalTo(parsedInfo.length()));
|
||||||
assertNull(parsedInfo.checksum());
|
assertEquals("666", parsedInfo.checksum());
|
||||||
assertNull(parsedInfo.metadata().checksum());
|
assertEquals("666", parsedInfo.metadata().checksum());
|
||||||
assertNull(parsedInfo.metadata().writtenBy());
|
assertEquals(Version.LATEST, parsedInfo.metadata().writtenBy());
|
||||||
} else {
|
} else {
|
||||||
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xContent)) {
|
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(xContent)) {
|
||||||
parser.nextToken();
|
parser.nextToken();
|
||||||
|
@ -139,14 +141,14 @@ public class FileInfoTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testGetPartSize() {
|
public void testGetPartSize() {
|
||||||
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36), new ByteSizeValue(6));
|
BlobStoreIndexShardSnapshot.FileInfo info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 36, "666"), new ByteSizeValue(6));
|
||||||
int numBytes = 0;
|
int numBytes = 0;
|
||||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||||
numBytes += info.partBytes(i);
|
numBytes += info.partBytes(i);
|
||||||
}
|
}
|
||||||
assertEquals(numBytes, 36);
|
assertEquals(numBytes, 36);
|
||||||
|
|
||||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35), new ByteSizeValue(6));
|
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", new StoreFileMetaData("foo", 35, "666"), new ByteSizeValue(6));
|
||||||
numBytes = 0;
|
numBytes = 0;
|
||||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||||
numBytes += info.partBytes(i);
|
numBytes += info.partBytes(i);
|
||||||
|
@ -154,7 +156,7 @@ public class FileInfoTests extends ESTestCase {
|
||||||
assertEquals(numBytes, 35);
|
assertEquals(numBytes, 35);
|
||||||
final int numIters = randomIntBetween(10, 100);
|
final int numIters = randomIntBetween(10, 100);
|
||||||
for (int j = 0; j < numIters; j++) {
|
for (int j = 0; j < numIters; j++) {
|
||||||
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000));
|
StoreFileMetaData metaData = new StoreFileMetaData("foo", randomIntBetween(0, 1000), "666");
|
||||||
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000)));
|
info = new BlobStoreIndexShardSnapshot.FileInfo("foo", metaData, new ByteSizeValue(randomIntBetween(1, 1000)));
|
||||||
numBytes = 0;
|
numBytes = 0;
|
||||||
for (int i = 0; i < info.numberOfParts(); i++) {
|
for (int i = 0; i < info.numberOfParts(); i++) {
|
||||||
|
|
|
@ -1,124 +0,0 @@
|
||||||
/*
|
|
||||||
* Licensed to Elasticsearch under one or more contributor
|
|
||||||
* license agreements. See the NOTICE file distributed with
|
|
||||||
* this work for additional information regarding copyright
|
|
||||||
* ownership. Elasticsearch licenses this file to you under
|
|
||||||
* the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
* not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing,
|
|
||||||
* software distributed under the License is distributed on an
|
|
||||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
|
||||||
* KIND, either express or implied. See the License for the
|
|
||||||
* specific language governing permissions and limitations
|
|
||||||
* under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
package org.elasticsearch.index.store;
|
|
||||||
|
|
||||||
import org.apache.lucene.index.CorruptIndexException;
|
|
||||||
import org.apache.lucene.store.Directory;
|
|
||||||
import org.apache.lucene.store.IOContext;
|
|
||||||
import org.apache.lucene.store.IndexOutput;
|
|
||||||
import org.elasticsearch.test.ESTestCase;
|
|
||||||
|
|
||||||
import java.nio.charset.StandardCharsets;
|
|
||||||
import java.util.zip.Adler32;
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Simple tests for LegacyVerification (old segments)
|
|
||||||
* @deprecated remove this test when support for lucene 4.x
|
|
||||||
* segments is not longer needed.
|
|
||||||
*/
|
|
||||||
@Deprecated
|
|
||||||
public class LegacyVerificationTests extends ESTestCase {
|
|
||||||
|
|
||||||
public void testAdler32() throws Exception {
|
|
||||||
Adler32 expected = new Adler32();
|
|
||||||
byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
|
|
||||||
expected.update(bytes);
|
|
||||||
String expectedString = Store.digestToString(expected.getValue());
|
|
||||||
|
|
||||||
Directory dir = newDirectory();
|
|
||||||
|
|
||||||
IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
|
|
||||||
VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
|
|
||||||
out.writeBytes(bytes, 0, bytes.length);
|
|
||||||
out.verify();
|
|
||||||
out.close();
|
|
||||||
out.verify();
|
|
||||||
|
|
||||||
dir.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testAdler32Corrupt() throws Exception {
|
|
||||||
Adler32 expected = new Adler32();
|
|
||||||
byte bytes[] = "abcdefgh".getBytes(StandardCharsets.UTF_8);
|
|
||||||
expected.update(bytes);
|
|
||||||
String expectedString = Store.digestToString(expected.getValue());
|
|
||||||
|
|
||||||
byte corruptBytes[] = "abcdefch".getBytes(StandardCharsets.UTF_8);
|
|
||||||
Directory dir = newDirectory();
|
|
||||||
|
|
||||||
IndexOutput o = dir.createOutput("legacy", IOContext.DEFAULT);
|
|
||||||
VerifyingIndexOutput out = new LegacyVerification.Adler32VerifyingIndexOutput(o, expectedString, 8);
|
|
||||||
out.writeBytes(corruptBytes, 0, bytes.length);
|
|
||||||
try {
|
|
||||||
out.verify();
|
|
||||||
fail();
|
|
||||||
} catch (CorruptIndexException e) {
|
|
||||||
// expected exception
|
|
||||||
}
|
|
||||||
out.close();
|
|
||||||
|
|
||||||
try {
|
|
||||||
out.verify();
|
|
||||||
fail();
|
|
||||||
} catch (CorruptIndexException e) {
|
|
||||||
// expected exception
|
|
||||||
}
|
|
||||||
|
|
||||||
dir.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testLengthOnlyOneByte() throws Exception {
|
|
||||||
Directory dir = newDirectory();
|
|
||||||
|
|
||||||
IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
|
|
||||||
VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 1);
|
|
||||||
out.writeByte((byte) 3);
|
|
||||||
out.verify();
|
|
||||||
out.close();
|
|
||||||
out.verify();
|
|
||||||
|
|
||||||
dir.close();
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testLengthOnlyCorrupt() throws Exception {
|
|
||||||
Directory dir = newDirectory();
|
|
||||||
|
|
||||||
IndexOutput o = dir.createOutput("oneByte", IOContext.DEFAULT);
|
|
||||||
VerifyingIndexOutput out = new LegacyVerification.LengthVerifyingIndexOutput(o, 2);
|
|
||||||
out.writeByte((byte) 3);
|
|
||||||
try {
|
|
||||||
out.verify();
|
|
||||||
fail();
|
|
||||||
} catch (CorruptIndexException expected) {
|
|
||||||
// expected exception
|
|
||||||
}
|
|
||||||
|
|
||||||
out.close();
|
|
||||||
|
|
||||||
try {
|
|
||||||
out.verify();
|
|
||||||
fail();
|
|
||||||
} catch (CorruptIndexException expected) {
|
|
||||||
// expected exception
|
|
||||||
}
|
|
||||||
|
|
||||||
dir.close();
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -355,95 +355,6 @@ public class StoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// IF THIS TEST FAILS ON UPGRADE GO LOOK AT THE OldSIMockingCodec!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
|
|
||||||
@AwaitsFix(bugUrl="Fails with seed E1394B038144F6E")
|
|
||||||
// The test currently fails because the segment infos and the index don't
|
|
||||||
// agree on the oldest version of a segment. We should fix this test by
|
|
||||||
// switching to a static bw index
|
|
||||||
public void testWriteLegacyChecksums() throws IOException {
|
|
||||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
|
||||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
|
||||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
|
||||||
// set default codec - all segments need checksums
|
|
||||||
final boolean usesOldCodec = randomBoolean();
|
|
||||||
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(usesOldCodec ? new OldSIMockingCodec() : TestUtil.getDefaultCodec()));
|
|
||||||
int docs = 1 + random().nextInt(100);
|
|
||||||
|
|
||||||
for (int i = 0; i < docs; i++) {
|
|
||||||
Document doc = new Document();
|
|
||||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
|
|
||||||
writer.addDocument(doc);
|
|
||||||
}
|
|
||||||
if (random().nextBoolean()) {
|
|
||||||
for (int i = 0; i < docs; i++) {
|
|
||||||
if (random().nextBoolean()) {
|
|
||||||
Document doc = new Document();
|
|
||||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
writer.updateDocument(new Term("id", "" + i), doc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (random().nextBoolean()) {
|
|
||||||
DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
|
|
||||||
}
|
|
||||||
Store.MetadataSnapshot metadata;
|
|
||||||
// check before we committed
|
|
||||||
try {
|
|
||||||
store.getMetadata();
|
|
||||||
fail("no index present - expected exception");
|
|
||||||
} catch (IndexNotFoundException ex) {
|
|
||||||
// expected
|
|
||||||
}
|
|
||||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
|
||||||
|
|
||||||
writer.close();
|
|
||||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
|
||||||
Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
|
|
||||||
for (String file : store.directory().listAll()) {
|
|
||||||
if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
BytesRef hash = new BytesRef();
|
|
||||||
if (file.startsWith("segments")) {
|
|
||||||
hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
|
|
||||||
}
|
|
||||||
StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
|
|
||||||
legacyMeta.put(file, storeFileMetaData);
|
|
||||||
checksums.add(storeFileMetaData);
|
|
||||||
}
|
|
||||||
checksums.write(store);
|
|
||||||
|
|
||||||
metadata = store.getMetadata();
|
|
||||||
Map<String, StoreFileMetaData> stringStoreFileMetaDataMap = metadata.asMap();
|
|
||||||
assertThat(legacyMeta.size(), equalTo(stringStoreFileMetaDataMap.size()));
|
|
||||||
if (usesOldCodec) {
|
|
||||||
for (StoreFileMetaData meta : legacyMeta.values()) {
|
|
||||||
assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
|
|
||||||
assertEquals(meta.name() + "checksum", meta.checksum());
|
|
||||||
assertTrue(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
|
|
||||||
// even if we have a legacy checksum - if we use a new codec we should reuse
|
|
||||||
for (StoreFileMetaData meta : legacyMeta.values()) {
|
|
||||||
assertTrue(meta.toString(), stringStoreFileMetaDataMap.containsKey(meta.name()));
|
|
||||||
assertFalse(meta + " vs. " + stringStoreFileMetaDataMap.get(meta.name()), stringStoreFileMetaDataMap.get(meta.name()).isSame(meta));
|
|
||||||
StoreFileMetaData storeFileMetaData = metadata.get(meta.name());
|
|
||||||
try (IndexInput input = store.openVerifyingInput(meta.name(), IOContext.DEFAULT, storeFileMetaData)) {
|
|
||||||
assertTrue(storeFileMetaData.toString(), input instanceof Store.VerifyingIndexInput);
|
|
||||||
input.seek(meta.length());
|
|
||||||
Store.verify(input);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertDeleteContent(store, directoryService);
|
|
||||||
IOUtils.close(store);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testNewChecksums() throws IOException {
|
public void testNewChecksums() throws IOException {
|
||||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
||||||
|
@ -489,7 +400,6 @@ public class StoreTests extends ESTestCase {
|
||||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
||||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
||||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
||||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
|
||||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
||||||
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
|
if (meta.name().endsWith(".si") || meta.name().startsWith("segments_")) {
|
||||||
assertThat(meta.hash().length, greaterThan(0));
|
assertThat(meta.hash().length, greaterThan(0));
|
||||||
|
@ -503,97 +413,6 @@ public class StoreTests extends ESTestCase {
|
||||||
IOUtils.close(store);
|
IOUtils.close(store);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void testMixedChecksums() throws IOException {
|
|
||||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
|
||||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random());
|
|
||||||
Store store = new Store(shardId, INDEX_SETTINGS, directoryService, new DummyShardLock(shardId));
|
|
||||||
// this time random codec....
|
|
||||||
IndexWriter writer = new IndexWriter(store.directory(), newIndexWriterConfig(random(), new MockAnalyzer(random())).setCodec(TestUtil.getDefaultCodec()));
|
|
||||||
int docs = 1 + random().nextInt(100);
|
|
||||||
|
|
||||||
for (int i = 0; i < docs; i++) {
|
|
||||||
Document doc = new Document();
|
|
||||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
doc.add(new SortedDocValuesField("dv", new BytesRef(TestUtil.randomRealisticUnicodeString(random()))));
|
|
||||||
writer.addDocument(doc);
|
|
||||||
}
|
|
||||||
if (random().nextBoolean()) {
|
|
||||||
for (int i = 0; i < docs; i++) {
|
|
||||||
if (random().nextBoolean()) {
|
|
||||||
Document doc = new Document();
|
|
||||||
doc.add(new TextField("id", "" + i, random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
doc.add(new TextField("body", TestUtil.randomRealisticUnicodeString(random()), random().nextBoolean() ? Field.Store.YES : Field.Store.NO));
|
|
||||||
writer.updateDocument(new Term("id", "" + i), doc);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (random().nextBoolean()) {
|
|
||||||
DirectoryReader.open(writer, random().nextBoolean()).close(); // flush
|
|
||||||
}
|
|
||||||
Store.MetadataSnapshot metadata;
|
|
||||||
// check before we committed
|
|
||||||
try {
|
|
||||||
store.getMetadata();
|
|
||||||
fail("no index present - expected exception");
|
|
||||||
} catch (IndexNotFoundException ex) {
|
|
||||||
// expected
|
|
||||||
}
|
|
||||||
assertThat(store.getMetadataOrEmpty(), is(Store.MetadataSnapshot.EMPTY)); // nothing committed
|
|
||||||
writer.commit();
|
|
||||||
writer.close();
|
|
||||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
|
||||||
metadata = store.getMetadata();
|
|
||||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
|
||||||
for (StoreFileMetaData meta : metadata) {
|
|
||||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
|
||||||
if (meta.checksum() == null) {
|
|
||||||
String checksum = null;
|
|
||||||
try {
|
|
||||||
CodecUtil.retrieveChecksum(input);
|
|
||||||
fail("expected a corrupt index - posting format has not checksums");
|
|
||||||
} catch (CorruptIndexException | IndexFormatTooOldException | IndexFormatTooNewException ex) {
|
|
||||||
try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
|
|
||||||
checksumIndexInput.seek(meta.length());
|
|
||||||
checksum = Store.digestToString(checksumIndexInput.getChecksum());
|
|
||||||
}
|
|
||||||
// fine - it's a postings format without checksums
|
|
||||||
checksums.add(new StoreFileMetaData(meta.name(), meta.length(), checksum, null));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
|
||||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
|
||||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
|
||||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertConsistent(store, metadata);
|
|
||||||
checksums.write(store);
|
|
||||||
metadata = store.getMetadata();
|
|
||||||
assertThat(metadata.asMap().isEmpty(), is(false));
|
|
||||||
for (StoreFileMetaData meta : metadata) {
|
|
||||||
assertThat("file: " + meta.name() + " has a null checksum", meta.checksum(), not(nullValue()));
|
|
||||||
if (meta.hasLegacyChecksum()) {
|
|
||||||
try (ChecksumIndexInput checksumIndexInput = store.directory().openChecksumInput(meta.name(), IOContext.DEFAULT)) {
|
|
||||||
checksumIndexInput.seek(meta.length());
|
|
||||||
assertThat(meta.checksum(), equalTo(Store.digestToString(checksumIndexInput.getChecksum())));
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
try (IndexInput input = store.directory().openInput(meta.name(), IOContext.DEFAULT)) {
|
|
||||||
String checksum = Store.digestToString(CodecUtil.retrieveChecksum(input));
|
|
||||||
assertThat("File: " + meta.name() + " has a different checksum", meta.checksum(), equalTo(checksum));
|
|
||||||
assertThat(meta.hasLegacyChecksum(), equalTo(false));
|
|
||||||
assertThat(meta.writtenBy(), equalTo(Version.LATEST));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assertConsistent(store, metadata);
|
|
||||||
TestUtil.checkIndex(store.directory());
|
|
||||||
assertDeleteContent(store, directoryService);
|
|
||||||
IOUtils.close(store);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testRenameFile() throws IOException {
|
public void testRenameFile() throws IOException {
|
||||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||||
DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
|
DirectoryService directoryService = new LuceneManagedDirectoryService(random(), false);
|
||||||
|
@ -654,18 +473,7 @@ public class StoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
final Adler32 adler32 = new Adler32();
|
final Adler32 adler32 = new Adler32();
|
||||||
long legacyFileLength = 0;
|
|
||||||
try (IndexOutput output = dir.createOutput("legacy.bin", IOContext.DEFAULT)) {
|
|
||||||
int iters = scaledRandomIntBetween(10, 100);
|
|
||||||
for (int i = 0; i < iters; i++) {
|
|
||||||
BytesRef bytesRef = new BytesRef(TestUtil.randomRealisticUnicodeString(random(), 10, 1024));
|
|
||||||
output.writeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
|
||||||
adler32.update(bytesRef.bytes, bytesRef.offset, bytesRef.length);
|
|
||||||
legacyFileLength += bytesRef.length;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
final long luceneChecksum;
|
final long luceneChecksum;
|
||||||
final long adler32LegacyChecksum = adler32.getValue();
|
|
||||||
try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
|
try (IndexInput indexInput = dir.openInput("lucene_checksum.bin", IOContext.DEFAULT)) {
|
||||||
assertEquals(luceneFileLength, indexInput.length());
|
assertEquals(luceneFileLength, indexInput.length());
|
||||||
luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
|
luceneChecksum = CodecUtil.retrieveChecksum(indexInput);
|
||||||
|
@ -673,38 +481,22 @@ public class StoreTests extends ESTestCase {
|
||||||
|
|
||||||
{ // positive check
|
{ // positive check
|
||||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
|
|
||||||
assertTrue(legacy.hasLegacyChecksum());
|
|
||||||
assertFalse(lucene.hasLegacyChecksum());
|
|
||||||
assertTrue(Store.checkIntegrityNoException(lucene, dir));
|
assertTrue(Store.checkIntegrityNoException(lucene, dir));
|
||||||
assertTrue(Store.checkIntegrityNoException(legacy, dir));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // negative check - wrong checksum
|
{ // negative check - wrong checksum
|
||||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0);
|
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength, Store.digestToString(luceneChecksum + 1), Version.LUCENE_4_8_0);
|
||||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum + 1));
|
|
||||||
assertTrue(legacy.hasLegacyChecksum());
|
|
||||||
assertFalse(lucene.hasLegacyChecksum());
|
|
||||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // negative check - wrong length
|
{ // negative check - wrong length
|
||||||
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
StoreFileMetaData lucene = new StoreFileMetaData("lucene_checksum.bin", luceneFileLength + 1, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||||
StoreFileMetaData legacy = new StoreFileMetaData("legacy.bin", legacyFileLength + 1, Store.digestToString(adler32LegacyChecksum));
|
|
||||||
assertTrue(legacy.hasLegacyChecksum());
|
|
||||||
assertFalse(lucene.hasLegacyChecksum());
|
|
||||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
{ // negative check - wrong file
|
{ // negative check - wrong file
|
||||||
StoreFileMetaData lucene = new StoreFileMetaData("legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
StoreFileMetaData lucene = new StoreFileMetaData("legacy.bin", luceneFileLength, Store.digestToString(luceneChecksum), Version.LUCENE_4_8_0);
|
||||||
StoreFileMetaData legacy = new StoreFileMetaData("lucene_checksum.bin", legacyFileLength, Store.digestToString(adler32LegacyChecksum));
|
|
||||||
assertTrue(legacy.hasLegacyChecksum());
|
|
||||||
assertFalse(lucene.hasLegacyChecksum());
|
|
||||||
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
assertFalse(Store.checkIntegrityNoException(lucene, dir));
|
||||||
assertFalse(Store.checkIntegrityNoException(legacy, dir));
|
|
||||||
}
|
}
|
||||||
dir.close();
|
dir.close();
|
||||||
|
|
||||||
|
@ -827,7 +619,7 @@ public class StoreTests extends ESTestCase {
|
||||||
|
|
||||||
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
|
public static void assertConsistent(Store store, Store.MetadataSnapshot metadata) throws IOException {
|
||||||
for (String file : store.directory().listAll()) {
|
for (String file : store.directory().listAll()) {
|
||||||
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && !Store.isChecksum(file) && file.startsWith("extra") == false) {
|
if (!IndexWriter.WRITE_LOCK_NAME.equals(file) && !IndexFileNames.OLD_SEGMENTS_GEN.equals(file) && file.startsWith("extra") == false) {
|
||||||
assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
assertTrue(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
||||||
} else {
|
} else {
|
||||||
assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
assertFalse(file + " is not in the map: " + metadata.asMap().size() + " vs. " + store.directory().listAll().length, metadata.asMap().containsKey(file));
|
||||||
|
@ -835,21 +627,6 @@ public class StoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Legacy indices without lucene CRC32 did never write or calculate checksums for segments_N files
|
|
||||||
* but for other files
|
|
||||||
*/
|
|
||||||
public void testRecoveryDiffWithLegacyCommit() {
|
|
||||||
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
|
||||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
|
|
||||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
|
|
||||||
Store.MetadataSnapshot first = new Store.MetadataSnapshot(unmodifiableMap(new HashMap<>(metaDataMap)), emptyMap(), 0);
|
|
||||||
|
|
||||||
Store.MetadataSnapshot second = new Store.MetadataSnapshot(unmodifiableMap(new HashMap<>(metaDataMap)), emptyMap(), 0);
|
|
||||||
Store.RecoveryDiff recoveryDiff = first.recoveryDiff(second);
|
|
||||||
assertEquals(recoveryDiff.toString(), recoveryDiff.different.size(), 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void testRecoveryDiff() throws IOException, InterruptedException {
|
public void testRecoveryDiff() throws IOException, InterruptedException {
|
||||||
int numDocs = 2 + random().nextInt(100);
|
int numDocs = 2 + random().nextInt(100);
|
||||||
List<Document> docs = new ArrayList<>();
|
List<Document> docs = new ArrayList<>();
|
||||||
|
@ -1043,21 +820,6 @@ public class StoreTests extends ESTestCase {
|
||||||
|
|
||||||
Store.MetadataSnapshot secondMeta = store.getMetadata();
|
Store.MetadataSnapshot secondMeta = store.getMetadata();
|
||||||
|
|
||||||
Store.LegacyChecksums checksums = new Store.LegacyChecksums();
|
|
||||||
Map<String, StoreFileMetaData> legacyMeta = new HashMap<>();
|
|
||||||
for (String file : store.directory().listAll()) {
|
|
||||||
if (file.equals("write.lock") || file.equals(IndexFileNames.OLD_SEGMENTS_GEN) || file.startsWith("extra")) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
BytesRef hash = new BytesRef();
|
|
||||||
if (file.startsWith("segments")) {
|
|
||||||
hash = Store.MetadataSnapshot.hashFile(store.directory(), file);
|
|
||||||
}
|
|
||||||
StoreFileMetaData storeFileMetaData = new StoreFileMetaData(file, store.directory().fileLength(file), file + "checksum", null, hash);
|
|
||||||
legacyMeta.put(file, storeFileMetaData);
|
|
||||||
checksums.add(storeFileMetaData);
|
|
||||||
}
|
|
||||||
checksums.write(store); // write one checksum file here - we expect it to survive all the cleanups
|
|
||||||
|
|
||||||
if (randomBoolean()) {
|
if (randomBoolean()) {
|
||||||
store.cleanupAndVerify("test", firstMeta);
|
store.cleanupAndVerify("test", firstMeta);
|
||||||
|
@ -1068,16 +830,13 @@ public class StoreTests extends ESTestCase {
|
||||||
if (file.startsWith("extra")) {
|
if (file.startsWith("extra")) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
assertTrue(firstMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
|
assertTrue(firstMeta.contains(file) || file.equals("write.lock"));
|
||||||
if (Store.isChecksum(file)) {
|
if (secondMeta.contains(file) == false) {
|
||||||
numChecksums++;
|
|
||||||
} else if (secondMeta.contains(file) == false) {
|
|
||||||
numNotFound++;
|
numNotFound++;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
||||||
assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
|
|
||||||
} else {
|
} else {
|
||||||
store.cleanupAndVerify("test", secondMeta);
|
store.cleanupAndVerify("test", secondMeta);
|
||||||
String[] strings = store.directory().listAll();
|
String[] strings = store.directory().listAll();
|
||||||
|
@ -1087,16 +846,13 @@ public class StoreTests extends ESTestCase {
|
||||||
if (file.startsWith("extra")) {
|
if (file.startsWith("extra")) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
assertTrue(file, secondMeta.contains(file) || Store.isChecksum(file) || file.equals("write.lock"));
|
assertTrue(file, secondMeta.contains(file) || file.equals("write.lock"));
|
||||||
if (Store.isChecksum(file)) {
|
if (firstMeta.contains(file) == false) {
|
||||||
numChecksums++;
|
|
||||||
} else if (firstMeta.contains(file) == false) {
|
|
||||||
numNotFound++;
|
numNotFound++;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
assertTrue("at least one file must not be in here since we have two commits?", numNotFound > 0);
|
||||||
assertEquals("we wrote one checksum but it's gone now? - checksums are supposed to be kept", numChecksums, 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
deleteContent(store.directory());
|
deleteContent(store.directory());
|
||||||
|
@ -1105,8 +861,8 @@ public class StoreTests extends ESTestCase {
|
||||||
|
|
||||||
public void testCleanUpWithLegacyChecksums() throws IOException {
|
public void testCleanUpWithLegacyChecksums() throws IOException {
|
||||||
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
Map<String, StoreFileMetaData> metaDataMap = new HashMap<>();
|
||||||
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, null, null, new BytesRef(new byte[]{1})));
|
metaDataMap.put("segments_1", new StoreFileMetaData("segments_1", 50, "foobar", Version.LUCENE_4_8_0, new BytesRef(new byte[]{1})));
|
||||||
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", null, new BytesRef()));
|
metaDataMap.put("_0_1.del", new StoreFileMetaData("_0_1.del", 42, "foobarbaz", Version.LUCENE_4_8_0, new BytesRef()));
|
||||||
Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(unmodifiableMap(metaDataMap), emptyMap(), 0);
|
Store.MetadataSnapshot snapshot = new Store.MetadataSnapshot(unmodifiableMap(metaDataMap), emptyMap(), 0);
|
||||||
|
|
||||||
final ShardId shardId = new ShardId("index", "_na_", 1);
|
final ShardId shardId = new ShardId("index", "_na_", 1);
|
||||||
|
@ -1232,8 +988,8 @@ public class StoreTests extends ESTestCase {
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Store.MetadataSnapshot createMetaDataSnapshot() {
|
protected Store.MetadataSnapshot createMetaDataSnapshot() {
|
||||||
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1);
|
StoreFileMetaData storeFileMetaData1 = new StoreFileMetaData("segments", 1, "666");
|
||||||
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1);
|
StoreFileMetaData storeFileMetaData2 = new StoreFileMetaData("no_segments", 1, "666");
|
||||||
Map<String, StoreFileMetaData> storeFileMetaDataMap = new HashMap<>();
|
Map<String, StoreFileMetaData> storeFileMetaDataMap = new HashMap<>();
|
||||||
storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
|
storeFileMetaDataMap.put(storeFileMetaData1.name(), storeFileMetaData1);
|
||||||
storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
|
storeFileMetaDataMap.put(storeFileMetaData2.name(), storeFileMetaData2);
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
*/
|
*/
|
||||||
package org.elasticsearch.indices.recovery;
|
package org.elasticsearch.indices.recovery;
|
||||||
|
|
||||||
|
import org.apache.lucene.codecs.CodecUtil;
|
||||||
|
import org.apache.lucene.store.IOContext;
|
||||||
import org.apache.lucene.store.IndexOutput;
|
import org.apache.lucene.store.IndexOutput;
|
||||||
import org.elasticsearch.Version;
|
import org.elasticsearch.Version;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
@ -25,6 +27,7 @@ import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||||
import org.elasticsearch.common.util.set.Sets;
|
import org.elasticsearch.common.util.set.Sets;
|
||||||
import org.elasticsearch.index.IndexService;
|
import org.elasticsearch.index.IndexService;
|
||||||
import org.elasticsearch.index.shard.IndexShard;
|
import org.elasticsearch.index.shard.IndexShard;
|
||||||
|
import org.elasticsearch.index.store.Store;
|
||||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||||
|
|
||||||
|
@ -50,14 +53,16 @@ public class RecoveryStatusTests extends ESSingleNodeTestCase {
|
||||||
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
|
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store())) {
|
try (IndexOutput indexOutput = status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store())) {
|
||||||
indexOutput.writeInt(1);
|
indexOutput.writeInt(1);
|
||||||
IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar");
|
IndexOutput openIndexOutput = status.getOpenIndexOutput("foo.bar");
|
||||||
assertSame(openIndexOutput, indexOutput);
|
assertSame(openIndexOutput, indexOutput);
|
||||||
openIndexOutput.writeInt(1);
|
openIndexOutput.writeInt(1);
|
||||||
|
CodecUtil.writeFooter(indexOutput);
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8), status.store());
|
status.openAndPutIndexOutput("foo.bar", new StoreFileMetaData("foo.bar", 8 + CodecUtil.footerLength(), "9z51nw"), status.store());
|
||||||
fail("file foo.bar is already opened and registered");
|
fail("file foo.bar is already opened and registered");
|
||||||
} catch (IllegalStateException ex) {
|
} catch (IllegalStateException ex) {
|
||||||
assertEquals("output for file [foo.bar] has already been created", ex.getMessage());
|
assertEquals("output for file [foo.bar] has already been created", ex.getMessage());
|
||||||
|
|
|
@ -62,10 +62,10 @@
|
||||||
|
|
||||||
- match: { test_index.shards.0.type: SNAPSHOT }
|
- match: { test_index.shards.0.type: SNAPSHOT }
|
||||||
- match: { test_index.shards.0.stage: DONE }
|
- match: { test_index.shards.0.stage: DONE }
|
||||||
- match: { test_index.shards.0.index.files.recovered: 1}
|
- match: { test_index.shards.0.index.files.recovered: 0}
|
||||||
- gt: { test_index.shards.0.index.size.recovered_in_bytes: 0}
|
- match: { test_index.shards.0.index.size.recovered_in_bytes: 0}
|
||||||
- match: { test_index.shards.0.index.files.reused: 0}
|
- match: { test_index.shards.0.index.files.reused: 1}
|
||||||
- match: { test_index.shards.0.index.size.reused_in_bytes: 0}
|
- gt: { test_index.shards.0.index.size.reused_in_bytes: 0}
|
||||||
|
|
||||||
# Remove our snapshot
|
# Remove our snapshot
|
||||||
- do:
|
- do:
|
||||||
|
|
|
@ -53,7 +53,7 @@ setup:
|
||||||
|
|
||||||
- match: { test_index.shards.0.type: SNAPSHOT }
|
- match: { test_index.shards.0.type: SNAPSHOT }
|
||||||
- match: { test_index.shards.0.stage: DONE }
|
- match: { test_index.shards.0.stage: DONE }
|
||||||
- match: { test_index.shards.0.index.files.recovered: 1}
|
- match: { test_index.shards.0.index.files.recovered: 0}
|
||||||
- gt: { test_index.shards.0.index.size.recovered_in_bytes: 0}
|
- match: { test_index.shards.0.index.size.recovered_in_bytes: 0}
|
||||||
- match: { test_index.shards.0.index.files.reused: 0}
|
- match: { test_index.shards.0.index.files.reused: 1}
|
||||||
- match: { test_index.shards.0.index.size.reused_in_bytes: 0}
|
- gt: { test_index.shards.0.index.size.reused_in_bytes: 0}
|
||||||
|
|
Loading…
Reference in New Issue