diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 22567f1313c..b71b472a253 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -26,8 +26,6 @@ import org.apache.lucene.index.IndexCommit; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.apache.lucene.util.SetOnce; import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; @@ -988,8 +986,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp final Map blobs, final String reason) { final String indexGeneration = Integer.toString(fileListGeneration); - final String currentIndexGen = indexShardSnapshotsFormat.blobName(indexGeneration); - final BlobStoreIndexShardSnapshots updatedSnapshots = new BlobStoreIndexShardSnapshots(snapshots); try { // Delete temporary index files first, as we might otherwise fail in the next step creating the new index file if an earlier @@ -1032,7 +1028,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp snapshotId, shardId, orphanedBlobs), e); } } catch (IOException e) { - String message = "Failed to finalize " + reason + " with shard index [" + currentIndexGen + "]"; + String message = + "Failed to finalize " + reason + " with shard index [" + indexShardSnapshotsFormat.blobName(indexGeneration) + "]"; throw new IndexShardSnapshotFailedException(shardId, message, e); } } @@ -1169,16 +1166,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp List filesInfo = snapshots.findPhysicalIndexFiles(fileName); if (filesInfo != null) { for (BlobStoreIndexShardSnapshot.FileInfo fileInfo : filesInfo) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(blobContainer, fileInfo, metadata); - } catch (Exception e) { - logger.warn(() -> new ParameterizedMessage("{} Can't calculate hash from blob for file [{}] [{}]", - shardId, fileInfo.physicalName(), fileInfo.metadata()), e); - } if (fileInfo.isSame(md) && snapshotFileExistsInBlobs(fileInfo, blobs)) { // a commit point file with the same name, size and checksum was already copied to repository // we will reuse it for this snapshot @@ -1349,32 +1336,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private static void maybeRecalculateMetadataHash(final BlobContainer blobContainer, final BlobStoreIndexShardSnapshot.FileInfo fileInfo, - Store.MetadataSnapshot snapshot) throws Exception { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = new PartSliceStream(blobContainer, fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } - private static final class PartSliceStream extends SlicedInputStream { private final BlobContainer container; diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java index f78ddab9ee4..3abe4d7b507 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/FileRestoreContext.java @@ -27,8 +27,6 @@ import org.apache.lucene.index.IndexFormatTooOldException; import org.apache.lucene.index.SegmentInfos; import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexOutput; -import org.apache.lucene.util.BytesRef; -import org.apache.lucene.util.BytesRefBuilder; import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.util.iterable.Iterables; import org.elasticsearch.index.shard.ShardId; @@ -127,17 +125,6 @@ public abstract class FileRestoreContext { final Map snapshotMetaData = new HashMap<>(); final Map fileInfos = new HashMap<>(); for (final BlobStoreIndexShardSnapshot.FileInfo fileInfo : snapshotFiles.indexFiles()) { - try { - // in 1.3.3 we added additional hashes for .si / segments_N files - // to ensure we don't double the space in the repo since old snapshots - // don't have this hash we try to read that hash from the blob store - // in a bwc compatible way. - maybeRecalculateMetadataHash(fileInfo, recoveryTargetMetadata); - } catch (Exception e) { - // if the index is broken we might not be able to read it - logger.warn(new ParameterizedMessage("[{}] Can't calculate hash from blog for file [{}] [{}]", shardId, - fileInfo.physicalName(), fileInfo.metadata()), e); - } snapshotMetaData.put(fileInfo.metadata().name(), fileInfo.metadata()); fileInfos.put(fileInfo.metadata().name(), fileInfo); } @@ -237,7 +224,7 @@ public abstract class FileRestoreContext { protected abstract InputStream fileInputStream(BlobStoreIndexShardSnapshot.FileInfo fileInfo); @SuppressWarnings("unchecked") - private Iterable concat(Store.RecoveryDiff diff) { + private static Iterable concat(Store.RecoveryDiff diff) { return Iterables.concat(diff.different, diff.missing); } @@ -276,29 +263,4 @@ public abstract class FileRestoreContext { } } - /** - * This is a BWC layer to ensure we update the snapshots metadata with the corresponding hashes before we compare them. - * The new logic for StoreFileMetaData reads the entire {@code .si} and {@code segments.n} files to strengthen the - * comparison of the files on a per-segment / per-commit level. - */ - private void maybeRecalculateMetadataHash(final BlobStoreIndexShardSnapshot.FileInfo fileInfo, Store.MetadataSnapshot snapshot) - throws IOException { - final StoreFileMetaData metadata; - if (fileInfo != null && (metadata = snapshot.get(fileInfo.physicalName())) != null) { - if (metadata.hash().length > 0 && fileInfo.metadata().hash().length == 0) { - // we have a hash - check if our repo has a hash too otherwise we have - // to calculate it. - // we might have multiple parts even though the file is small... make sure we read all of it. - try (InputStream stream = fileInputStream(fileInfo)) { - BytesRefBuilder builder = new BytesRefBuilder(); - Store.MetadataSnapshot.hashFile(builder, stream, fileInfo.length()); - BytesRef hash = fileInfo.metadata().hash(); // reset the file infos metadata hash - assert hash.length == 0; - hash.bytes = builder.bytes(); - hash.offset = 0; - hash.length = builder.length(); - } - } - } - } }