diff --git a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java index 7d4df49aeaf..7f7f5d55e72 100644 --- a/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java +++ b/server/src/main/java/org/elasticsearch/repositories/RepositoryData.java @@ -123,6 +123,20 @@ public final class RepositoryData { return indices; } + /** + * Returns the list of {@link IndexId} that have their snapshots updated but not removed (because they are still referenced by other + * snapshots) after removing the given snapshot from the repository. + * + * @param snapshotId SnapshotId to remove + * @return List of indices that are changed but not removed + */ + public List indicesToUpdateAfterRemovingSnapshot(SnapshotId snapshotId) { + return indexSnapshots.entrySet().stream() + .filter(entry -> entry.getValue().size() > 1 && entry.getValue().contains(snapshotId)) + .map(Map.Entry::getKey) + .collect(Collectors.toList()); + } + /** * Add a snapshot and its indices to the repository; returns a new instance. If the snapshot * already exists in the repository data, this method throws an IllegalArgumentException. diff --git a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java index 520b4d7a5be..c32ee835091 100644 --- a/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java +++ b/server/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java @@ -27,7 +27,6 @@ import org.apache.lucene.store.IOContext; import org.apache.lucene.store.IndexInput; import org.apache.lucene.store.RateLimiter; import org.apache.lucene.util.SetOnce; -import org.elasticsearch.ElasticsearchParseException; import org.elasticsearch.ExceptionsHelper; import org.elasticsearch.Version; import org.elasticsearch.action.ActionListener; @@ -61,7 +60,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.unit.ByteSizeUnit; import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.util.concurrent.AbstractRunnable; -import org.elasticsearch.common.util.set.Sets; import org.elasticsearch.common.xcontent.LoggingDeprecationHandler; import org.elasticsearch.common.xcontent.NamedXContentRegistry; import org.elasticsearch.common.xcontent.XContentFactory; @@ -101,17 +99,13 @@ import java.io.IOException; import java.io.InputStream; import java.nio.file.NoSuchFileException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collection; import java.util.Collections; -import java.util.HashSet; import java.util.List; import java.util.Map; -import java.util.Optional; import java.util.Set; import java.util.concurrent.Executor; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Function; import java.util.stream.Collectors; import static org.elasticsearch.index.snapshots.blobstore.BlobStoreIndexShardSnapshot.FileInfo.canonicalName; @@ -407,6 +401,8 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp try { final Map rootBlobs = blobContainer().listBlobs(); final RepositoryData repositoryData = getRepositoryData(latestGeneration(rootBlobs.keySet())); + // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never + // delete an index that was created by another master node after writing this index-N blob. final Map foundIndices = blobStore().blobContainer(indicesPath()).children(); doDeleteShardSnapshots(snapshotId, repositoryStateId, foundIndices, rootBlobs, repositoryData, listener); } catch (Exception ex) { @@ -432,36 +428,13 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp Map rootBlobs, RepositoryData repositoryData, ActionListener listener) throws IOException { final RepositoryData updatedRepositoryData = repositoryData.removeSnapshot(snapshotId); - // Cache the indices that were found before writing out the new index-N blob so that a stuck master will never - // delete an index that was created by another master node after writing this index-N blob. writeIndexGen(updatedRepositoryData, repositoryStateId); - SnapshotInfo snapshot = null; - try { - snapshot = getSnapshotInfo(snapshotId); - } catch (SnapshotMissingException ex) { - listener.onFailure(ex); - return; - } catch (IllegalStateException | SnapshotException | ElasticsearchParseException ex) { - logger.warn(() -> new ParameterizedMessage("cannot read snapshot file [{}]", snapshotId), ex); - } - final List snapMetaFilesToDelete = - Arrays.asList(snapshotFormat.blobName(snapshotId.getUUID()), globalMetaDataFormat.blobName(snapshotId.getUUID())); - try { - blobContainer().deleteBlobsIgnoringIfNotExists(snapMetaFilesToDelete); - } catch (IOException e) { - logger.warn(() -> new ParameterizedMessage("[{}] Unable to delete global metadata files", snapshotId), e); - } - final Map survivingIndices = updatedRepositoryData.getIndices(); deleteIndices( updatedRepositoryData, - Optional.ofNullable(snapshot).map(info -> info.indices().stream().filter(survivingIndices::containsKey) - .map(updatedRepositoryData::resolveIndexId).collect(Collectors.toList())).orElse(Collections.emptyList()), + repositoryData.indicesToUpdateAfterRemovingSnapshot(snapshotId), snapshotId, ActionListener.delegateFailure(listener, - (l, v) -> cleanupStaleBlobs(foundIndices, - Sets.difference(rootBlobs.keySet(), new HashSet<>(snapMetaFilesToDelete)).stream().collect( - Collectors.toMap(Function.identity(), rootBlobs::get)), - updatedRepositoryData, ActionListener.map(l, ignored -> null)))); + (l, v) -> cleanupStaleBlobs(foundIndices, rootBlobs, updatedRepositoryData, ActionListener.map(l, ignored -> null)))); } /** diff --git a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java index 880bafd2f23..ee0e91e144b 100644 --- a/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java +++ b/server/src/test/java/org/elasticsearch/repositories/RepositoryDataTests.java @@ -42,6 +42,7 @@ import java.util.Map; import java.util.Set; import static org.elasticsearch.repositories.RepositoryData.EMPTY_REPO_GEN; +import static org.hamcrest.Matchers.containsInAnyOrder; import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; @@ -57,6 +58,17 @@ public class RepositoryDataTests extends ESTestCase { assertEquals(repositoryData1.hashCode(), repositoryData2.hashCode()); } + public void testIndicesToUpdateAfterRemovingSnapshot() { + final RepositoryData repositoryData = generateRandomRepoData(); + final List indicesBefore = new ArrayList<>(repositoryData.getIndices().values()); + final SnapshotId randomSnapshot = randomFrom(repositoryData.getSnapshotIds()); + final IndexId[] indicesToUpdate = indicesBefore.stream().filter(index -> { + final Set snapshotIds = repositoryData.getSnapshots(index); + return snapshotIds.contains(randomSnapshot) && snapshotIds.size() > 1; + }).toArray(IndexId[]::new); + assertThat(repositoryData.indicesToUpdateAfterRemovingSnapshot(randomSnapshot), containsInAnyOrder(indicesToUpdate)); + } + public void testXContent() throws IOException { RepositoryData repositoryData = generateRandomRepoData(); XContentBuilder builder = JsonXContent.contentBuilder();