From b720216395b39b350e027882efb051a22ecf5b16 Mon Sep 17 00:00:00 2001
From: Ali Beyad
Date: Thu, 2 Jun 2016 17:01:14 -0400
Subject: [PATCH] Adds UUIDs to snapshots
This commit adds a UUID for each snapshot, in addition to the already
existing repository and snapshot name. The addition of UUIDs will enable
more robust handling of the deletion of previous snapshots and lingering
files from partially failed delete operations, on top of being able to
uniquely track each snapshot.
Closes #18228
Relates #18156
---
.../create/TransportCreateSnapshotAction.java | 18 +-
.../delete/TransportDeleteSnapshotAction.java | 4 +-
.../snapshots/get/GetSnapshotsResponse.java | 2 +-
.../get/TransportGetSnapshotsAction.java | 56 ++-
.../TransportRestoreSnapshotAction.java | 15 +-
.../snapshots/status/SnapshotStatus.java | 45 +-
.../status/TransportNodesSnapshotsStatus.java | 38 +-
.../TransportSnapshotsStatusAction.java | 99 +++--
.../cluster/RestoreInProgress.java | 103 ++---
.../cluster/SnapshotsInProgress.java | 99 +++--
.../cluster/metadata/SnapshotId.java | 127 ------
.../cluster/routing/IndexRoutingTable.java | 10 +-
.../cluster/routing/RestoreSource.java | 53 ++-
.../elasticsearch/index/shard/IndexShard.java | 3 +-
.../index/shard/StoreRecovery.java | 4 +-
.../index/snapshots/IndexShardRepository.java | 2 +-
.../BlobStoreIndexShardRepository.java | 14 +-
.../cluster/IndicesClusterStateService.java | 4 +-
.../repositories/Repository.java | 12 +-
.../blobstore/BlobStoreRepository.java | 185 ++++++---
.../repositories/uri/URLRepository.java | 3 +-
.../rest/action/cat/RestRecoveryAction.java | 4 +-
.../rest/action/cat/RestSnapshotAction.java | 4 +-
.../ConcurrentSnapshotExecutionException.java | 8 +-
.../InvalidSnapshotNameException.java | 5 +-
.../snapshots/RestoreService.java | 220 +++++-----
.../org/elasticsearch/snapshots/Snapshot.java | 102 +++++
.../snapshots/SnapshotCreationException.java | 17 +-
.../snapshots/SnapshotException.java | 63 ++-
.../elasticsearch/snapshots/SnapshotId.java | 118 ++++++
.../elasticsearch/snapshots/SnapshotInfo.java | 79 ++--
.../snapshots/SnapshotMissingException.java | 13 +-
.../snapshots/SnapshotRestoreException.java | 13 +-
.../snapshots/SnapshotShardsService.java | 90 ++--
.../snapshots/SnapshotsService.java | 387 ++++++++++--------
.../ExceptionSerializationTests.java | 16 +-
.../cluster/snapshots/SnapshotBlocksIT.java | 2 +-
.../bwcompat/RepositoryUpgradabilityIT.java | 195 +++++++++
.../bwcompat/RestoreBackwardsCompatIT.java | 1 -
.../cluster/ClusterStateDiffIT.java | 7 +-
.../cluster/routing/ShardRoutingTests.java | 13 +-
.../cluster/routing/UnassignedInfoTests.java | 8 +-
.../NodeVersionAllocationDeciderTests.java | 7 +-
.../gateway/PrimaryShardAllocatorTests.java | 8 +-
.../index/shard/IndexShardTests.java | 7 +-
.../blobstore/BlobStoreRepositoryTests.java | 230 +++++++++++
.../action/cat/RestRecoveryActionTests.java | 15 +-
.../AbstractSnapshotIntegTestCase.java | 33 +-
.../DedicatedClusterSnapshotRestoreIT.java | 8 +-
.../SharedClusterSnapshotRestoreIT.java | 271 +++++++++---
.../SnapshotBackwardsCompatibilityIT.java | 2 +-
.../snapshots/SnapshotTests.java | 57 +++
.../snapshots/mockstore/MockRepository.java | 6 +-
.../repositories/azure/AzureRepository.java | 4 +-
.../storage/AzureStorageServiceMock.java | 11 +-
.../common/io/FileTestUtils.java | 44 ++
.../test/ESSingleNodeTestCase.java | 6 +-
57 files changed, 2019 insertions(+), 951 deletions(-)
delete mode 100644 core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java
create mode 100644 core/src/main/java/org/elasticsearch/snapshots/Snapshot.java
create mode 100644 core/src/main/java/org/elasticsearch/snapshots/SnapshotId.java
create mode 100644 core/src/test/java/org/elasticsearch/bwcompat/RepositoryUpgradabilityIT.java
create mode 100644 core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
create mode 100644 core/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java
index 2654ac0c269..53b1bf86f66 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/create/TransportCreateSnapshotAction.java
@@ -26,10 +26,10 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.snapshots.SnapshotInfo;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
@@ -72,7 +72,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction listener) {
SnapshotsService.SnapshotRequest snapshotRequest =
- new SnapshotsService.SnapshotRequest("create_snapshot [" + request.snapshot() + "]", request.snapshot(), request.repository())
+ new SnapshotsService.SnapshotRequest(request.repository(), request.snapshot(), "create_snapshot [" + request.snapshot() + "]")
.indices(request.indices())
.indicesOptions(request.indicesOptions())
.partial(request.partial())
@@ -84,19 +84,19 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction listener) {
- SnapshotId snapshotIds = new SnapshotId(request.repository(), request.snapshot());
- snapshotsService.deleteSnapshot(snapshotIds, new SnapshotsService.DeleteSnapshotListener() {
+ snapshotsService.deleteSnapshot(request.repository(), request.snapshot(), new SnapshotsService.DeleteSnapshotListener() {
@Override
public void onResponse() {
listener.onResponse(new DeleteSnapshotResponse(true));
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
index ec996e6d366..924f5a90d42 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/GetSnapshotsResponse.java
@@ -42,7 +42,7 @@ public class GetSnapshotsResponse extends ActionResponse implements ToXContent {
}
GetSnapshotsResponse(List snapshots) {
- this.snapshots = snapshots;
+ this.snapshots = Collections.unmodifiableList(snapshots);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java
index 833b1a62289..f734201e7bd 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/get/TransportGetSnapshotsAction.java
@@ -26,20 +26,22 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
+import org.elasticsearch.snapshots.SnapshotMissingException;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.ArrayList;
-import java.util.Collections;
+import java.util.HashMap;
import java.util.LinkedHashSet;
import java.util.List;
+import java.util.Map;
import java.util.Set;
/**
@@ -52,7 +54,8 @@ public class TransportGetSnapshotsAction extends TransportMasterNodeAction listener) {
+ protected void masterOperation(final GetSnapshotsRequest request, ClusterState state,
+ final ActionListener listener) {
try {
+ final String repository = request.repository();
List snapshotInfoBuilder = new ArrayList<>();
if (isAllSnapshots(request.snapshots())) {
- snapshotInfoBuilder.addAll(snapshotsService.snapshots(request.repository(), request.ignoreUnavailable()));
+ snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository));
+ snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository,
+ snapshotsService.snapshotIds(repository),
+ request.ignoreUnavailable()));
} else if (isCurrentSnapshots(request.snapshots())) {
- snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(request.repository()));
+ snapshotInfoBuilder.addAll(snapshotsService.currentSnapshots(repository));
} else {
- Set snapshotsToGet = new LinkedHashSet<>(); // to keep insertion order
- List snapshots = null;
+ final Map allSnapshotIds = new HashMap<>();
+ for (SnapshotInfo snapshotInfo : snapshotsService.currentSnapshots(repository)) {
+ SnapshotId snapshotId = snapshotInfo.snapshotId();
+ allSnapshotIds.put(snapshotId.getName(), snapshotId);
+ }
+ for (SnapshotId snapshotId : snapshotsService.snapshotIds(repository)) {
+ allSnapshotIds.put(snapshotId.getName(), snapshotId);
+ }
+ final Set toResolve = new LinkedHashSet<>(); // maintain order
for (String snapshotOrPattern : request.snapshots()) {
if (Regex.isSimpleMatchPattern(snapshotOrPattern) == false) {
- snapshotsToGet.add(snapshotOrPattern);
- } else {
- if (snapshots == null) { // lazily load snapshots
- snapshots = snapshotsService.snapshots(request.repository(), request.ignoreUnavailable());
+ if (allSnapshotIds.containsKey(snapshotOrPattern)) {
+ toResolve.add(allSnapshotIds.get(snapshotOrPattern));
+ } else if (request.ignoreUnavailable() == false) {
+ throw new SnapshotMissingException(repository, snapshotOrPattern);
}
- for (SnapshotInfo snapshot : snapshots) {
- if (Regex.simpleMatch(snapshotOrPattern, snapshot.name())) {
- snapshotsToGet.add(snapshot.name());
+ } else {
+ for (Map.Entry entry : allSnapshotIds.entrySet()) {
+ if (Regex.simpleMatch(snapshotOrPattern, entry.getKey())) {
+ toResolve.add(entry.getValue());
}
}
}
}
- for (String snapshot : snapshotsToGet) {
- SnapshotId snapshotId = new SnapshotId(request.repository(), snapshot);
- snapshotInfoBuilder.add(snapshotsService.snapshot(snapshotId));
+
+ if (toResolve.isEmpty() && request.ignoreUnavailable() == false) {
+ throw new SnapshotMissingException(repository, request.snapshots()[0]);
}
+
+ snapshotInfoBuilder.addAll(snapshotsService.snapshots(repository, new ArrayList<>(toResolve), request.ignoreUnavailable()));
}
- listener.onResponse(new GetSnapshotsResponse(Collections.unmodifiableList(snapshotInfoBuilder)));
+ listener.onResponse(new GetSnapshotsResponse(snapshotInfoBuilder));
} catch (Throwable t) {
listener.onFailure(t);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java
index cbbc195370c..a38fbce46c2 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/restore/TransportRestoreSnapshotAction.java
@@ -26,12 +26,12 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.RestoreInfo;
import org.elasticsearch.snapshots.RestoreService;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@@ -72,23 +72,22 @@ public class TransportRestoreSnapshotAction extends TransportMasterNodeAction listener) {
- RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(
- "restore_snapshot[" + request.snapshot() + "]", request.repository(), request.snapshot(),
+ protected void masterOperation(final RestoreSnapshotRequest request, final ClusterState state, final ActionListener listener) {
+ RestoreService.RestoreRequest restoreRequest = new RestoreService.RestoreRequest(request.repository(), request.snapshot(),
request.indices(), request.indicesOptions(), request.renamePattern(), request.renameReplacement(),
request.settings(), request.masterNodeTimeout(), request.includeGlobalState(), request.partial(), request.includeAliases(),
- request.indexSettings(), request.ignoreIndexSettings());
+ request.indexSettings(), request.ignoreIndexSettings(), "restore_snapshot[" + request.snapshot() + "]");
restoreService.restoreSnapshot(restoreRequest, new ActionListener() {
@Override
public void onResponse(RestoreInfo restoreInfo) {
if (restoreInfo == null && request.waitForCompletion()) {
restoreService.addListener(new ActionListener() {
- SnapshotId snapshotId = new SnapshotId(request.repository(), request.snapshot());
-
@Override
public void onResponse(RestoreService.RestoreCompletionResponse restoreCompletionResponse) {
- if (this.snapshotId.equals(restoreCompletionResponse.getSnapshotId())) {
+ final Snapshot snapshot = restoreCompletionResponse.getSnapshot();
+ if (snapshot.getRepository().equals(request.repository()) &&
+ snapshot.getSnapshotId().getName().equals(request.snapshot())) {
listener.onResponse(new RestoreSnapshotResponse(restoreCompletionResponse.getRestoreInfo()));
restoreService.removeListener(this);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java
index 54f87f3c5fc..1a5ef9ab933 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/SnapshotStatus.java
@@ -20,7 +20,7 @@
package org.elasticsearch.action.admin.cluster.snapshots.status;
import org.elasticsearch.cluster.SnapshotsInProgress.State;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@@ -35,6 +35,7 @@ import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
+import java.util.Objects;
import java.util.Set;
import static java.util.Collections.unmodifiableMap;
@@ -44,7 +45,7 @@ import static java.util.Collections.unmodifiableMap;
*/
public class SnapshotStatus implements ToXContent, Streamable {
- private SnapshotId snapshotId;
+ private Snapshot snapshot;
private State state;
@@ -56,11 +57,10 @@ public class SnapshotStatus implements ToXContent, Streamable {
private SnapshotStats stats;
-
- SnapshotStatus(SnapshotId snapshotId, State state, List shards) {
- this.snapshotId = snapshotId;
- this.state = state;
- this.shards = shards;
+ SnapshotStatus(final Snapshot snapshot, final State state, final List shards) {
+ this.snapshot = Objects.requireNonNull(snapshot);
+ this.state = Objects.requireNonNull(state);
+ this.shards = Objects.requireNonNull(shards);
shardsStats = new SnapshotShardsStats(shards);
updateShardStats();
}
@@ -69,10 +69,10 @@ public class SnapshotStatus implements ToXContent, Streamable {
}
/**
- * Returns snapshot id
+ * Returns snapshot
*/
- public SnapshotId getSnapshotId() {
- return snapshotId;
+ public Snapshot getSnapshot() {
+ return snapshot;
}
/**
@@ -124,7 +124,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
@Override
public void readFrom(StreamInput in) throws IOException {
- snapshotId = SnapshotId.readSnapshotId(in);
+ snapshot = new Snapshot(in);
state = State.fromValue(in.readByte());
int size = in.readVInt();
List builder = new ArrayList<>();
@@ -137,7 +137,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
@Override
public void writeTo(StreamOutput out) throws IOException {
- snapshotId.writeTo(out);
+ snapshot.writeTo(out);
out.writeByte(state.value());
out.writeVInt(shards.size());
for (SnapshotIndexShardStatus shard : shards) {
@@ -170,7 +170,6 @@ public class SnapshotStatus implements ToXContent, Streamable {
}
}
-
/**
* Returns number of files in the snapshot
*/
@@ -178,22 +177,22 @@ public class SnapshotStatus implements ToXContent, Streamable {
return stats;
}
- static final class Fields {
- static final String SNAPSHOT = "snapshot";
- static final String REPOSITORY = "repository";
- static final String STATE = "state";
- static final String INDICES = "indices";
- }
+ private static final String SNAPSHOT = "snapshot";
+ private static final String REPOSITORY = "repository";
+ private static final String UUID = "uuid";
+ private static final String STATE = "state";
+ private static final String INDICES = "indices";
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
- builder.field(Fields.SNAPSHOT, snapshotId.getSnapshot());
- builder.field(Fields.REPOSITORY, snapshotId.getRepository());
- builder.field(Fields.STATE, state.name());
+ builder.field(SNAPSHOT, snapshot.getSnapshotId().getName());
+ builder.field(REPOSITORY, snapshot.getRepository());
+ builder.field(UUID, snapshot.getSnapshotId().getUUID());
+ builder.field(STATE, state.name());
shardsStats.toXContent(builder, params);
stats.toXContent(builder, params);
- builder.startObject(Fields.INDICES);
+ builder.startObject(INDICES);
for (SnapshotIndexStatus indexStatus : getIndices().values()) {
indexStatus.toXContent(builder, params);
}
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java
index bc139389460..8e7361c1928 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportNodesSnapshotsStatus.java
@@ -29,7 +29,7 @@ import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
@@ -94,11 +94,11 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction> snapshotMapBuilder = new HashMap<>();
+ Map> snapshotMapBuilder = new HashMap<>();
try {
String nodeId = clusterService.localNode().getId();
- for (SnapshotId snapshotId : request.snapshotIds) {
- Map shardsStatus = snapshotShardsService.currentSnapshotShards(snapshotId);
+ for (Snapshot snapshot : request.snapshots) {
+ Map shardsStatus = snapshotShardsService.currentSnapshotShards(snapshot);
if (shardsStatus == null) {
continue;
}
@@ -114,7 +114,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction {
- private SnapshotId[] snapshotIds;
+ private Snapshot[] snapshots;
public Request() {
}
@@ -138,8 +138,8 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction snapshotIds;
+ private List snapshots;
public NodeRequest() {
}
NodeRequest(String nodeId, TransportNodesSnapshotsStatus.Request request) {
super(nodeId);
- snapshotIds = Arrays.asList(request.snapshotIds);
+ snapshots = Arrays.asList(request.snapshots);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- snapshotIds = in.readList(SnapshotId::readSnapshotId);
+ snapshots = in.readList(Snapshot::new);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
- out.writeStreamableList(snapshotIds);
+ out.writeList(snapshots);
}
}
public static class NodeSnapshotStatus extends BaseNodeResponse {
- private Map> status;
+ private Map> status;
NodeSnapshotStatus() {
}
- public NodeSnapshotStatus(DiscoveryNode node, Map> status) {
+ public NodeSnapshotStatus(DiscoveryNode node, Map> status) {
super(node);
this.status = status;
}
- public Map> status() {
+ public Map> status() {
return status;
}
@@ -222,9 +222,9 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction> snapshotMapBuilder = new HashMap<>(numberOfSnapshots);
+ Map> snapshotMapBuilder = new HashMap<>(numberOfSnapshots);
for (int i = 0; i < numberOfSnapshots; i++) {
- SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
+ Snapshot snapshot = new Snapshot(in);
int numberOfShards = in.readVInt();
Map shardMapBuilder = new HashMap<>(numberOfShards);
for (int j = 0; j < numberOfShards; j++) {
@@ -232,7 +232,7 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction> entry : status.entrySet()) {
+ for (Map.Entry> entry : status.entrySet()) {
entry.getKey().writeTo(out);
out.writeVInt(entry.getValue().size());
for (Map.Entry shardEntry : entry.getValue().entrySet()) {
diff --git a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
index ee60a919da6..afbb2179c0a 100644
--- a/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
+++ b/core/src/main/java/org/elasticsearch/action/admin/cluster/snapshots/status/TransportSnapshotsStatusAction.java
@@ -29,26 +29,32 @@ import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
+import org.elasticsearch.snapshots.Snapshot;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.snapshots.SnapshotInfo;
+import org.elasticsearch.snapshots.SnapshotMissingException;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.function.Function;
+import java.util.stream.Collectors;
/**
*/
@@ -87,8 +93,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction listener) throws Exception {
- List currentSnapshots = snapshotsService.currentSnapshots(request.repository(), request.snapshots());
-
+ List currentSnapshots =
+ snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
if (currentSnapshots.isEmpty()) {
listener.onResponse(buildResponse(request, currentSnapshots, null));
return;
@@ -105,19 +111,19 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction() {
@Override
public void onResponse(TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) {
try {
List currentSnapshots =
- snapshotsService.currentSnapshots(request.repository(), request.snapshots());
+ snapshotsService.currentSnapshots(request.repository(), Arrays.asList(request.snapshots()));
listener.onResponse(buildResponse(request, currentSnapshots, nodeSnapshotStatuses));
} catch (Throwable e) {
listener.onFailure(e);
@@ -136,12 +142,12 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction currentSnapshots,
+ private SnapshotsStatusResponse buildResponse(SnapshotsStatusRequest request, List currentSnapshotEntries,
TransportNodesSnapshotsStatus.NodesSnapshotStatus nodeSnapshotStatuses) throws IOException {
// First process snapshot that are currently processed
List builder = new ArrayList<>();
- Set currentSnapshotIds = new HashSet<>();
- if (!currentSnapshots.isEmpty()) {
+ Set currentSnapshotNames = new HashSet<>();
+ if (!currentSnapshotEntries.isEmpty()) {
Map nodeSnapshotStatusMap;
if (nodeSnapshotStatuses != null) {
nodeSnapshotStatusMap = nodeSnapshotStatuses.getNodesMap();
@@ -149,8 +155,8 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction();
}
- for (SnapshotsInProgress.Entry entry : currentSnapshots) {
- currentSnapshotIds.add(entry.snapshotId());
+ for (SnapshotsInProgress.Entry entry : currentSnapshotEntries) {
+ currentSnapshotNames.add(entry.snapshot().getSnapshotId().getName());
List shardStatusBuilder = new ArrayList<>();
for (ObjectObjectCursor shardEntry : entry.shards()) {
SnapshotsInProgress.ShardSnapshotStatus status = shardEntry.value;
@@ -158,7 +164,7 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction shardStatues = nodeStatus.status().get(entry.snapshotId());
+ Map shardStatues = nodeStatus.status().get(entry.snapshot());
if (shardStatues != null) {
SnapshotIndexShardStatus shardStatus = shardStatues.get(shardEntry.key);
if (shardStatus != null) {
@@ -190,41 +196,50 @@ public class TransportSnapshotsStatusAction extends TransportMasterNodeAction 0) {
- for (String snapshotName : request.snapshots()) {
- SnapshotId snapshotId = new SnapshotId(request.repository(), snapshotName);
- if (currentSnapshotIds.contains(snapshotId)) {
- // This is a snapshot the is currently running - skipping
+ final String repositoryName = request.repository();
+ if (Strings.hasText(repositoryName) && request.snapshots() != null && request.snapshots().length > 0) {
+ final Set requestedSnapshotNames = Sets.newHashSet(request.snapshots());
+ final Map matchedSnapshotIds = snapshotsService.snapshotIds(repositoryName).stream()
+ .filter(s -> requestedSnapshotNames.contains(s.getName()))
+ .collect(Collectors.toMap(SnapshotId::getName, Function.identity()));
+ for (final String snapshotName : request.snapshots()) {
+ SnapshotId snapshotId = matchedSnapshotIds.get(snapshotName);
+ if (snapshotId == null) {
+ if (currentSnapshotNames.contains(snapshotName)) {
+ // we've already found this snapshot in the current snapshot entries, so skip over
continue;
+ } else {
+ // neither in the current snapshot entries nor found in the repository
+ throw new SnapshotMissingException(repositoryName, snapshotName);
}
- SnapshotInfo snapshot = snapshotsService.snapshot(snapshotId);
- List shardStatusBuilder = new ArrayList<>();
- if (snapshot.state().completed()) {
- Map shardStatues = snapshotsService.snapshotShards(snapshotId);
- for (Map.Entry shardStatus : shardStatues.entrySet()) {
- shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue()));
- }
- final SnapshotsInProgress.State state;
- switch (snapshot.state()) {
- case FAILED:
- state = SnapshotsInProgress.State.FAILED;
- break;
- case SUCCESS:
- case PARTIAL:
- // Translating both PARTIAL and SUCCESS to SUCCESS for now
- // TODO: add the differentiation on the metadata level in the next major release
- state = SnapshotsInProgress.State.SUCCESS;
- break;
- default:
- throw new IllegalArgumentException("Unknown snapshot state " + snapshot.state());
- }
- builder.add(new SnapshotStatus(snapshotId, state, Collections.unmodifiableList(shardStatusBuilder)));
+ }
+ SnapshotInfo snapshotInfo = snapshotsService.snapshot(repositoryName, snapshotId);
+ List shardStatusBuilder = new ArrayList<>();
+ if (snapshotInfo.state().completed()) {
+ Map shardStatues =
+ snapshotsService.snapshotShards(request.repository(), snapshotInfo);
+ for (Map.Entry shardStatus : shardStatues.entrySet()) {
+ shardStatusBuilder.add(new SnapshotIndexShardStatus(shardStatus.getKey(), shardStatus.getValue()));
}
+ final SnapshotsInProgress.State state;
+ switch (snapshotInfo.state()) {
+ case FAILED:
+ state = SnapshotsInProgress.State.FAILED;
+ break;
+ case SUCCESS:
+ case PARTIAL:
+ // Translating both PARTIAL and SUCCESS to SUCCESS for now
+ // TODO: add the differentiation on the metadata level in the next major release
+ state = SnapshotsInProgress.State.SUCCESS;
+ break;
+ default:
+ throw new IllegalArgumentException("Unknown snapshot state " + snapshotInfo.state());
+ }
+ builder.add(new SnapshotStatus(new Snapshot(repositoryName, snapshotInfo.snapshotId()), state, Collections.unmodifiableList(shardStatusBuilder)));
}
}
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
index a083476ea2f..55a09f87f75 100644
--- a/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
+++ b/core/src/main/java/org/elasticsearch/cluster/RestoreInProgress.java
@@ -21,7 +21,7 @@ package org.elasticsearch.cluster;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.ClusterState.Custom;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -34,6 +34,7 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
/**
* Meta data about restore processes that are currently executing
@@ -73,22 +74,6 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
return this.entries;
}
- /**
- * Returns currently running restore process with corresponding snapshot id or null if this snapshot is not being
- * restored
- *
- * @param snapshotId snapshot id
- * @return restore metadata or null
- */
- public Entry snapshot(SnapshotId snapshotId) {
- for (Entry entry : entries) {
- if (snapshotId.equals(entry.snapshotId())) {
- return entry;
- }
- }
- return null;
- }
-
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -111,22 +96,22 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
*/
public static class Entry {
private final State state;
- private final SnapshotId snapshotId;
+ private final Snapshot snapshot;
private final ImmutableOpenMap shards;
private final List indices;
/**
* Creates new restore metadata
*
- * @param snapshotId snapshot id
+ * @param snapshot snapshot
* @param state current state of the restore process
* @param indices list of indices being restored
* @param shards map of shards being restored to their current restore status
*/
- public Entry(SnapshotId snapshotId, State state, List indices, ImmutableOpenMap shards) {
- this.snapshotId = snapshotId;
- this.state = state;
- this.indices = indices;
+ public Entry(Snapshot snapshot, State state, List indices, ImmutableOpenMap shards) {
+ this.snapshot = Objects.requireNonNull(snapshot);
+ this.state = Objects.requireNonNull(state);
+ this.indices = Objects.requireNonNull(indices);
if (shards == null) {
this.shards = ImmutableOpenMap.of();
} else {
@@ -135,12 +120,12 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
}
/**
- * Returns snapshot id
+ * Returns snapshot
*
- * @return snapshot id
+ * @return snapshot
*/
- public SnapshotId snapshotId() {
- return this.snapshotId;
+ public Snapshot snapshot() {
+ return this.snapshot;
}
/**
@@ -172,26 +157,22 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
@Override
public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
-
- Entry entry = (Entry) o;
-
- if (!indices.equals(entry.indices)) return false;
- if (!snapshotId.equals(entry.snapshotId)) return false;
- if (!shards.equals(entry.shards)) return false;
- if (state != entry.state) return false;
-
- return true;
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ @SuppressWarnings("unchecked") Entry entry = (Entry) o;
+ return snapshot.equals(entry.snapshot) &&
+ state == entry.state &&
+ indices.equals(entry.indices) &&
+ shards.equals(entry.shards);
}
@Override
public int hashCode() {
- int result = state.hashCode();
- result = 31 * result + snapshotId.hashCode();
- result = 31 * result + shards.hashCode();
- result = 31 * result + indices.hashCode();
- return result;
+ return Objects.hash(snapshot, state, indices, shards);
}
}
@@ -301,31 +282,29 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
@Override
public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
- ShardRestoreStatus status = (ShardRestoreStatus) o;
-
- if (nodeId != null ? !nodeId.equals(status.nodeId) : status.nodeId != null) return false;
- if (reason != null ? !reason.equals(status.reason) : status.reason != null) return false;
- if (state != status.state) return false;
-
- return true;
+ @SuppressWarnings("unchecked") ShardRestoreStatus status = (ShardRestoreStatus) o;
+ return state == status.state &&
+ Objects.equals(nodeId, status.nodeId) &&
+ Objects.equals(reason, status.reason);
}
@Override
public int hashCode() {
- int result = state != null ? state.hashCode() : 0;
- result = 31 * result + (nodeId != null ? nodeId.hashCode() : 0);
- result = 31 * result + (reason != null ? reason.hashCode() : 0);
- return result;
+ return Objects.hash(state, nodeId, reason);
}
}
/**
* Shard restore process state
*/
- public static enum State {
+ public enum State {
/**
* Initializing state
*/
@@ -409,7 +388,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
public RestoreInProgress readFrom(StreamInput in) throws IOException {
Entry[] entries = new Entry[in.readVInt()];
for (int i = 0; i < entries.length; i++) {
- SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
+ Snapshot snapshot = new Snapshot(in);
State state = State.fromValue(in.readByte());
int indices = in.readVInt();
List indexBuilder = new ArrayList<>();
@@ -423,7 +402,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
ShardRestoreStatus shardState = ShardRestoreStatus.readShardRestoreStatus(in);
builder.put(shardId, shardState);
}
- entries[i] = new Entry(snapshotId, state, Collections.unmodifiableList(indexBuilder), builder.build());
+ entries[i] = new Entry(snapshot, state, Collections.unmodifiableList(indexBuilder), builder.build());
}
return new RestoreInProgress(entries);
}
@@ -435,7 +414,7 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(entries.size());
for (Entry entry : entries) {
- entry.snapshotId().writeTo(out);
+ entry.snapshot().writeTo(out);
out.writeByte(entry.state().value());
out.writeVInt(entry.indices().size());
for (String index : entry.indices()) {
@@ -471,8 +450,8 @@ public class RestoreInProgress extends AbstractDiffable implements Custo
*/
public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
- builder.field("snapshot", entry.snapshotId().getSnapshot());
- builder.field("repository", entry.snapshotId().getRepository());
+ builder.field("snapshot", entry.snapshot().getSnapshotId().getName());
+ builder.field("repository", entry.snapshot().getRepository());
builder.field("state", entry.state());
builder.startArray("indices");
{
diff --git a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
index 5432c1f0f19..f0a0fdec665 100644
--- a/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
+++ b/core/src/main/java/org/elasticsearch/cluster/SnapshotsInProgress.java
@@ -23,13 +23,13 @@ import com.carrotsearch.hppc.ObjectContainer;
import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.cluster.ClusterState.Custom;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.snapshots.Snapshot;
import java.io.IOException;
import java.util.ArrayList;
@@ -66,7 +66,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
public static class Entry {
private final State state;
- private final SnapshotId snapshotId;
+ private final Snapshot snapshot;
private final boolean includeGlobalState;
private final boolean partial;
private final ImmutableOpenMap shards;
@@ -74,9 +74,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
private final ImmutableOpenMap> waitingIndices;
private final long startTime;
- public Entry(SnapshotId snapshotId, boolean includeGlobalState, boolean partial, State state, List indices, long startTime, ImmutableOpenMap shards) {
+ public Entry(Snapshot snapshot, boolean includeGlobalState, boolean partial, State state, List indices, long startTime,
+ ImmutableOpenMap shards) {
this.state = state;
- this.snapshotId = snapshotId;
+ this.snapshot = snapshot;
this.includeGlobalState = includeGlobalState;
this.partial = partial;
this.indices = indices;
@@ -91,15 +92,15 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
}
public Entry(Entry entry, State state, ImmutableOpenMap shards) {
- this(entry.snapshotId, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
+ this(entry.snapshot, entry.includeGlobalState, entry.partial, state, entry.indices, entry.startTime, shards);
}
public Entry(Entry entry, ImmutableOpenMap shards) {
this(entry, entry.state, shards);
}
- public SnapshotId snapshotId() {
- return this.snapshotId;
+ public Snapshot snapshot() {
+ return this.snapshot;
}
public ImmutableOpenMap shards() {
@@ -142,7 +143,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
if (startTime != entry.startTime) return false;
if (!indices.equals(entry.indices)) return false;
if (!shards.equals(entry.shards)) return false;
- if (!snapshotId.equals(entry.snapshotId)) return false;
+ if (!snapshot.equals(entry.snapshot)) return false;
if (state != entry.state) return false;
if (!waitingIndices.equals(entry.waitingIndices)) return false;
@@ -152,7 +153,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
@Override
public int hashCode() {
int result = state.hashCode();
- result = 31 * result + snapshotId.hashCode();
+ result = 31 * result + snapshot.hashCode();
result = 31 * result + (includeGlobalState ? 1 : 0);
result = 31 * result + (partial ? 1 : 0);
result = 31 * result + shards.hashCode();
@@ -162,6 +163,11 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
return result;
}
+ @Override
+ public String toString() {
+ return snapshot.toString();
+ }
+
private ImmutableOpenMap> findWaitingIndices(ImmutableOpenMap shards) {
Map> waitingIndicesMap = new HashMap<>();
for (ObjectObjectCursor entry : shards) {
@@ -277,7 +283,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
}
}
- public static enum State {
+ public enum State {
INIT((byte) 0, false, false),
STARTED((byte) 1, false, false),
SUCCESS((byte) 2, true, false),
@@ -347,9 +353,10 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
return this.entries;
}
- public Entry snapshot(SnapshotId snapshotId) {
+ public Entry snapshot(final Snapshot snapshot) {
for (Entry entry : entries) {
- if (snapshotId.equals(entry.snapshotId())) {
+ final Snapshot curr = entry.snapshot();
+ if (curr.equals(snapshot)) {
return entry;
}
}
@@ -365,7 +372,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
public SnapshotsInProgress readFrom(StreamInput in) throws IOException {
Entry[] entries = new Entry[in.readVInt()];
for (int i = 0; i < entries.length; i++) {
- SnapshotId snapshotId = SnapshotId.readSnapshotId(in);
+ Snapshot snapshot = new Snapshot(in);
boolean includeGlobalState = in.readBoolean();
boolean partial = in.readBoolean();
State state = State.fromValue(in.readByte());
@@ -383,7 +390,13 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
State shardState = State.fromValue(in.readByte());
builder.put(shardId, new ShardSnapshotStatus(nodeId, shardState));
}
- entries[i] = new Entry(snapshotId, includeGlobalState, partial, state, Collections.unmodifiableList(indexBuilder), startTime, builder.build());
+ entries[i] = new Entry(snapshot,
+ includeGlobalState,
+ partial,
+ state,
+ Collections.unmodifiableList(indexBuilder),
+ startTime,
+ builder.build());
}
return new SnapshotsInProgress(entries);
}
@@ -392,7 +405,7 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(entries.size());
for (Entry entry : entries) {
- entry.snapshotId().writeTo(out);
+ entry.snapshot().writeTo(out);
out.writeBoolean(entry.includeGlobalState());
out.writeBoolean(entry.partial());
out.writeByte(entry.state().value());
@@ -410,25 +423,24 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
}
}
- static final class Fields {
- static final String REPOSITORY = "repository";
- static final String SNAPSHOTS = "snapshots";
- static final String SNAPSHOT = "snapshot";
- static final String INCLUDE_GLOBAL_STATE = "include_global_state";
- static final String PARTIAL = "partial";
- static final String STATE = "state";
- static final String INDICES = "indices";
- static final String START_TIME_MILLIS = "start_time_millis";
- static final String START_TIME = "start_time";
- static final String SHARDS = "shards";
- static final String INDEX = "index";
- static final String SHARD = "shard";
- static final String NODE = "node";
- }
+ private static final String REPOSITORY = "repository";
+ private static final String SNAPSHOTS = "snapshots";
+ private static final String SNAPSHOT = "snapshot";
+ private static final String UUID = "uuid";
+ private static final String INCLUDE_GLOBAL_STATE = "include_global_state";
+ private static final String PARTIAL = "partial";
+ private static final String STATE = "state";
+ private static final String INDICES = "indices";
+ private static final String START_TIME_MILLIS = "start_time_millis";
+ private static final String START_TIME = "start_time";
+ private static final String SHARDS = "shards";
+ private static final String INDEX = "index";
+ private static final String SHARD = "shard";
+ private static final String NODE = "node";
@Override
public XContentBuilder toXContent(XContentBuilder builder, ToXContent.Params params) throws IOException {
- builder.startArray(Fields.SNAPSHOTS);
+ builder.startArray(SNAPSHOTS);
for (Entry entry : entries) {
toXContent(entry, builder, params);
}
@@ -438,30 +450,31 @@ public class SnapshotsInProgress extends AbstractDiffable implements Cus
public void toXContent(Entry entry, XContentBuilder builder, ToXContent.Params params) throws IOException {
builder.startObject();
- builder.field(Fields.REPOSITORY, entry.snapshotId().getRepository());
- builder.field(Fields.SNAPSHOT, entry.snapshotId().getSnapshot());
- builder.field(Fields.INCLUDE_GLOBAL_STATE, entry.includeGlobalState());
- builder.field(Fields.PARTIAL, entry.partial());
- builder.field(Fields.STATE, entry.state());
- builder.startArray(Fields.INDICES);
+ builder.field(REPOSITORY, entry.snapshot().getRepository());
+ builder.field(SNAPSHOT, entry.snapshot().getSnapshotId().getName());
+ builder.field(UUID, entry.snapshot().getSnapshotId().getUUID());
+ builder.field(INCLUDE_GLOBAL_STATE, entry.includeGlobalState());
+ builder.field(PARTIAL, entry.partial());
+ builder.field(STATE, entry.state());
+ builder.startArray(INDICES);
{
for (String index : entry.indices()) {
builder.value(index);
}
}
builder.endArray();
- builder.timeValueField(Fields.START_TIME_MILLIS, Fields.START_TIME, entry.startTime());
- builder.startArray(Fields.SHARDS);
+ builder.timeValueField(START_TIME_MILLIS, START_TIME, entry.startTime());
+ builder.startArray(SHARDS);
{
for (ObjectObjectCursor shardEntry : entry.shards) {
ShardId shardId = shardEntry.key;
ShardSnapshotStatus status = shardEntry.value;
builder.startObject();
{
- builder.field(Fields.INDEX, shardId.getIndex());
- builder.field(Fields.SHARD, shardId.getId());
- builder.field(Fields.STATE, status.state());
- builder.field(Fields.NODE, status.nodeId());
+ builder.field(INDEX, shardId.getIndex());
+ builder.field(SHARD, shardId.getId());
+ builder.field(STATE, status.state());
+ builder.field(NODE, status.nodeId());
}
builder.endObject();
}
diff --git a/core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java b/core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java
deleted file mode 100644
index 88c60f1f07e..00000000000
--- a/core/src/main/java/org/elasticsearch/cluster/metadata/SnapshotId.java
+++ /dev/null
@@ -1,127 +0,0 @@
-/*
- * Licensed to Elasticsearch under one or more contributor
- * license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright
- * ownership. Elasticsearch licenses this file to you under
- * the Apache License, Version 2.0 (the "License"); you may
- * not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.elasticsearch.cluster.metadata;
-
-import org.elasticsearch.common.io.stream.StreamInput;
-import org.elasticsearch.common.io.stream.StreamOutput;
-import org.elasticsearch.common.io.stream.Streamable;
-
-import java.io.IOException;
-
-/**
- * Snapshot ID - repository name + snapshot name
- */
-public class SnapshotId implements Streamable {
-
- private String repository;
-
- private String snapshot;
-
- // Caching hash code
- private int hashCode;
-
- private SnapshotId() {
- }
-
- /**
- * Constructs new snapshot id
- *
- * @param repository repository name
- * @param snapshot snapshot name
- */
- public SnapshotId(String repository, String snapshot) {
- this.repository = repository;
- this.snapshot = snapshot;
- this.hashCode = computeHashCode();
- }
-
- /**
- * Returns repository name
- *
- * @return repository name
- */
- public String getRepository() {
- return repository;
- }
-
- /**
- * Returns snapshot name
- *
- * @return snapshot name
- */
- public String getSnapshot() {
- return snapshot;
- }
-
- @Override
- public String toString() {
- return repository + ":" + snapshot;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null) return false;
- SnapshotId snapshotId = (SnapshotId) o;
- return snapshot.equals(snapshotId.snapshot) && repository.equals(snapshotId.repository);
- }
-
- @Override
- public int hashCode() {
- return hashCode;
- }
-
- private int computeHashCode() {
- int result = repository != null ? repository.hashCode() : 0;
- result = 31 * result + snapshot.hashCode();
- return result;
- }
-
- /**
- * Reads snapshot id from stream input
- *
- * @param in stream input
- * @return snapshot id
- */
- public static SnapshotId readSnapshotId(StreamInput in) throws IOException {
- SnapshotId snapshot = new SnapshotId();
- snapshot.readFrom(in);
- return snapshot;
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void readFrom(StreamInput in) throws IOException {
- repository = in.readString();
- snapshot = in.readString();
- hashCode = computeHashCode();
- }
-
- /**
- * {@inheritDoc}
- */
- @Override
- public void writeTo(StreamOutput out) throws IOException {
- out.writeString(repository);
- out.writeString(snapshot);
- }
-}
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
index 8bb0f51e22e..0fe49369177 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/IndexRoutingTable.java
@@ -376,14 +376,20 @@ public class IndexRoutingTable extends AbstractDiffable imple
* Initializes a new empty index, to be restored from a snapshot
*/
public Builder initializeAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) {
- return initializeAsRestore(indexMetaData, restoreSource, ignoreShards, true, new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED, "restore_source[" + restoreSource.snapshotId().getRepository() + "/" + restoreSource.snapshotId().getSnapshot() + "]"));
+ final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.NEW_INDEX_RESTORED,
+ "restore_source[" + restoreSource.snapshot().getRepository() + "/" +
+ restoreSource.snapshot().getSnapshotId().getName() + "]");
+ return initializeAsRestore(indexMetaData, restoreSource, ignoreShards, true, unassignedInfo);
}
/**
* Initializes an existing index, to be restored from a snapshot
*/
public Builder initializeAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
- return initializeAsRestore(indexMetaData, restoreSource, null, false, new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED, "restore_source[" + restoreSource.snapshotId().getRepository() + "/" + restoreSource.snapshotId().getSnapshot() + "]"));
+ final UnassignedInfo unassignedInfo = new UnassignedInfo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED,
+ "restore_source[" + restoreSource.snapshot().getRepository() + "/" +
+ restoreSource.snapshot().getSnapshotId().getName() + "]");
+ return initializeAsRestore(indexMetaData, restoreSource, null, false, unassignedInfo);
}
/**
diff --git a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
index c091f71798b..f80e55ed8b3 100644
--- a/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
+++ b/core/src/main/java/org/elasticsearch/cluster/routing/RestoreSource.java
@@ -20,7 +20,7 @@
package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
@@ -28,13 +28,14 @@ import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import java.io.IOException;
+import java.util.Objects;
/**
* Represents snapshot and index from which a recovering index should be restored
*/
public class RestoreSource implements Streamable, ToXContent {
- private SnapshotId snapshotId;
+ private Snapshot snapshot;
private String index;
@@ -43,14 +44,14 @@ public class RestoreSource implements Streamable, ToXContent {
RestoreSource() {
}
- public RestoreSource(SnapshotId snapshotId, Version version, String index) {
- this.snapshotId = snapshotId;
- this.version = version;
- this.index = index;
+ public RestoreSource(Snapshot snapshot, Version version, String index) {
+ this.snapshot = Objects.requireNonNull(snapshot);
+ this.version = Objects.requireNonNull(version);
+ this.index = Objects.requireNonNull(index);
}
- public SnapshotId snapshotId() {
- return snapshotId;
+ public Snapshot snapshot() {
+ return snapshot;
}
public String index() {
@@ -61,26 +62,20 @@ public class RestoreSource implements Streamable, ToXContent {
return version;
}
- public static RestoreSource readRestoreSource(StreamInput in) throws IOException {
- RestoreSource restoreSource = new RestoreSource();
- restoreSource.readFrom(in);
- return restoreSource;
- }
-
public static RestoreSource readOptionalRestoreSource(StreamInput in) throws IOException {
return in.readOptionalStreamable(RestoreSource::new);
}
@Override
public void readFrom(StreamInput in) throws IOException {
- snapshotId = SnapshotId.readSnapshotId(in);
+ snapshot = new Snapshot(in);
version = Version.readVersion(in);
index = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
- snapshotId.writeTo(out);
+ snapshot.writeTo(out);
Version.writeVersion(version, out);
out.writeString(index);
}
@@ -88,8 +83,8 @@ public class RestoreSource implements Streamable, ToXContent {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
return builder.startObject()
- .field("repository", snapshotId.getRepository())
- .field("snapshot", snapshotId.getSnapshot())
+ .field("repository", snapshot.getRepository())
+ .field("snapshot", snapshot.getSnapshotId().getName())
.field("version", version.toString())
.field("index", index)
.endObject();
@@ -97,26 +92,24 @@ public class RestoreSource implements Streamable, ToXContent {
@Override
public String toString() {
- return snapshotId.toString();
+ return snapshot.toString();
}
@Override
public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
- RestoreSource that = (RestoreSource) o;
-
- if (!index.equals(that.index)) return false;
- if (!snapshotId.equals(that.snapshotId)) return false;
-
- return true;
+ @SuppressWarnings("unchecked") RestoreSource that = (RestoreSource) o;
+ return snapshot.equals(that.snapshot) && index.equals(that.index);
}
@Override
public int hashCode() {
- int result = snapshotId.hashCode();
- result = 31 * result + index.hashCode();
- return result;
+ return Objects.hash(snapshot, index);
}
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
index adcf36f694f..44445f0b6dd 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/IndexShard.java
@@ -39,7 +39,6 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RestoreSource;
-import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.common.Booleans;
@@ -1442,7 +1441,7 @@ public class IndexShard extends AbstractIndexShardComponent {
markAsRecovering("from snapshot", recoveryState); // mark the shard as recovering on the cluster state thread
threadPool.generic().execute(() -> {
try {
- final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshotId().getRepository());
+ final IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(restoreSource.snapshot().getRepository());
if (restoreFromRepository(indexShardRepository)) {
recoveryListener.onRecoveryDone(recoveryState);
}
diff --git a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
index c7d6f35eff2..62173f936c5 100644
--- a/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
+++ b/core/src/main/java/org/elasticsearch/index/shard/StoreRecovery.java
@@ -395,7 +395,7 @@ final class StoreRecovery {
throw new IndexShardRestoreFailedException(shardId, "empty restore source");
}
if (logger.isTraceEnabled()) {
- logger.trace("[{}] restoring shard [{}]", restoreSource.snapshotId(), shardId);
+ logger.trace("[{}] restoring shard [{}]", restoreSource.snapshot(), shardId);
}
try {
translogState.totalOperations(0);
@@ -405,7 +405,7 @@ final class StoreRecovery {
if (!shardId.getIndexName().equals(restoreSource.index())) {
snapshotShardId = new ShardId(restoreSource.index(), IndexMetaData.INDEX_UUID_NA_VALUE, shardId.id());
}
- indexShardRepository.restore(restoreSource.snapshotId(), restoreSource.version(), shardId, snapshotShardId, indexShard.recoveryState());
+ indexShardRepository.restore(restoreSource.snapshot().getSnapshotId(), restoreSource.version(), shardId, snapshotShardId, indexShard.recoveryState());
indexShard.skipTranslogRecovery();
indexShard.finalizeRecovery();
indexShard.postRecovery("restore done");
diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java
index ca481e1430d..5988d82def2 100644
--- a/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java
+++ b/core/src/main/java/org/elasticsearch/index/snapshots/IndexShardRepository.java
@@ -21,7 +21,7 @@ package org.elasticsearch.index.snapshots;
import org.apache.lucene.index.IndexCommit;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveryState;
diff --git a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
index c15d2cfcdbe..e0032fe503b 100644
--- a/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
+++ b/core/src/main/java/org/elasticsearch/index/snapshots/blobstore/BlobStoreIndexShardRepository.java
@@ -32,7 +32,7 @@ import org.apache.lucene.util.BytesRef;
import org.apache.lucene.util.BytesRefBuilder;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParseFieldMatcher;
@@ -204,7 +204,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
try {
snapshotContext.restore();
} catch (Throwable e) {
- throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId.getSnapshot() + "]", e);
+ throw new IndexShardRestoreFailedException(shardId, "failed to restore snapshot [" + snapshotId + "]", e);
}
}
@@ -318,7 +318,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
int fileListGeneration = tuple.v2();
try {
- indexShardSnapshotFormat(version).delete(blobContainer, snapshotId.getSnapshot());
+ indexShardSnapshotFormat(version).delete(blobContainer, snapshotId.getName());
} catch (IOException e) {
logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId);
}
@@ -326,7 +326,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
// Build a list of snapshots that should be preserved
List newSnapshotsList = new ArrayList<>();
for (SnapshotFiles point : snapshots) {
- if (!point.snapshot().equals(snapshotId.getSnapshot())) {
+ if (!point.snapshot().equals(snapshotId.getName())) {
newSnapshotsList.add(point);
}
}
@@ -339,7 +339,7 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
*/
public BlobStoreIndexShardSnapshot loadSnapshot() {
try {
- return indexShardSnapshotFormat(version).read(blobContainer, snapshotId.getSnapshot());
+ return indexShardSnapshotFormat(version).read(blobContainer, snapshotId.getName());
} catch (IOException ex) {
throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
}
@@ -605,14 +605,14 @@ public class BlobStoreIndexShardRepository extends AbstractComponent implements
// now create and write the commit point
snapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FINALIZE);
- BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getSnapshot(),
+ BlobStoreIndexShardSnapshot snapshot = new BlobStoreIndexShardSnapshot(snapshotId.getName(),
snapshotIndexCommit.getGeneration(), indexCommitPointFiles, snapshotStatus.startTime(),
// snapshotStatus.startTime() is assigned on the same machine, so it's safe to use with VLong
System.currentTimeMillis() - snapshotStatus.startTime(), indexNumberOfFiles, indexTotalFilesSize);
//TODO: The time stored in snapshot doesn't include cleanup time.
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
try {
- indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getSnapshot());
+ indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getName());
} catch (IOException e) {
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
}
diff --git a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
index dcf055275e5..719fb812c74 100644
--- a/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
+++ b/core/src/main/java/org/elasticsearch/indices/cluster/IndicesClusterStateService.java
@@ -645,7 +645,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent
* Typical snapshot usage pattern:
*
- * - Master calls {@link #initializeSnapshot(org.elasticsearch.cluster.metadata.SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)}
+ *
- Master calls {@link #initializeSnapshot(SnapshotId, List, org.elasticsearch.cluster.metadata.MetaData)}
* with list of indices that will be included into the snapshot
* - Data nodes call {@link org.elasticsearch.index.snapshots.IndexShardRepository#snapshot(SnapshotId, ShardId, IndexCommit, IndexShardSnapshotStatus)} for each shard
* - When all shard calls return master calls {@link #finalizeSnapshot}
@@ -51,7 +52,7 @@ public interface Repository extends LifecycleComponent {
/**
* Reads snapshot description from repository.
*
- * @param snapshotId snapshot ID
+ * @param snapshotId snapshot id
* @return information about snapshot
*/
SnapshotInfo readSnapshot(SnapshotId snapshotId);
@@ -65,10 +66,11 @@ public interface Repository extends LifecycleComponent {
* @param indices list of indices
* @return information about snapshot
*/
- MetaData readSnapshotMetaData(SnapshotId snapshotId, SnapshotInfo snapshot, List indices) throws IOException;
+ MetaData readSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException;
/**
- * Returns the list of snapshots currently stored in the repository
+ * Returns the list of snapshots currently stored in the repository that match the given predicate on the snapshot name.
+ * To get all snapshots, the predicate filter should return true regardless of the input.
*
* @return snapshot list
*/
diff --git a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
index 121df3e5832..506e64cb9a5 100644
--- a/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
+++ b/core/src/main/java/org/elasticsearch/repositories/blobstore/BlobStoreRepository.java
@@ -24,7 +24,7 @@ import org.elasticsearch.ElasticsearchParseException;
import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.UUIDs;
@@ -34,6 +34,7 @@ import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.compress.NotXContentException;
import org.elasticsearch.common.io.Streams;
@@ -56,7 +57,6 @@ import org.elasticsearch.repositories.Repository;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.RepositorySettings;
import org.elasticsearch.repositories.RepositoryVerificationException;
-import org.elasticsearch.snapshots.InvalidSnapshotNameException;
import org.elasticsearch.snapshots.SnapshotCreationException;
import org.elasticsearch.snapshots.SnapshotException;
import org.elasticsearch.snapshots.SnapshotInfo;
@@ -71,6 +71,7 @@ import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
+import java.util.stream.Collectors;
/**
* BlobStore - based implementation of Snapshot Repository
@@ -130,7 +131,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent s.getName().equals(snapshotName))) {
+ throw new SnapshotCreationException(repositoryName, snapshotId, "snapshot with the same name already exists");
+ }
+ if (snapshotFormat.exists(snapshotsBlobContainer, blobId(snapshotId)) ||
+ snapshotLegacyFormat.exists(snapshotsBlobContainer, snapshotName)) {
+ throw new SnapshotCreationException(repositoryName, snapshotId, "snapshot with such name already exists");
}
// Write Global MetaData
- globalMetaDataFormat.write(metaData, snapshotsBlobContainer, snapshotId.getSnapshot());
+ globalMetaDataFormat.write(metaData, snapshotsBlobContainer, snapshotName);
for (String index : indices) {
final IndexMetaData indexMetaData = metaData.index(index);
final BlobPath indexPath = basePath().add("indices").add(index);
final BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
- indexMetaDataFormat.write(indexMetaData, indexMetaDataBlobContainer, snapshotId.getSnapshot());
+ indexMetaDataFormat.write(indexMetaData, indexMetaDataBlobContainer, snapshotName);
}
} catch (IOException ex) {
- throw new SnapshotCreationException(snapshotId, ex);
+ throw new SnapshotCreationException(repositoryName, snapshotId, ex);
}
}
@@ -314,35 +320,27 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent snapshotIds = snapshots();
- if (snapshotIds.contains(snapshotId)) {
- List builder = new ArrayList<>();
- for (SnapshotId id : snapshotIds) {
- if (!snapshotId.equals(id)) {
- builder.add(id);
- }
- }
- snapshotIds = Collections.unmodifiableList(builder);
- }
+ List snapshotIds = snapshots().stream().filter(id -> snapshotId.equals(id) == false).collect(Collectors.toList());
writeSnapshotList(snapshotIds);
// Now delete all indices
for (String index : indices) {
BlobPath indexPath = basePath().add("indices").add(index);
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
try {
- indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getSnapshot());
+ indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getName());
} catch (IOException ex) {
logger.warn("[{}] failed to delete metadata for index [{}]", ex, snapshotId, index);
}
@@ -368,10 +366,21 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent indices, long startTime, String failure, int totalShards, List shardFailures) {
+ public SnapshotInfo finalizeSnapshot(final SnapshotId snapshotId,
+ final List indices,
+ final long startTime,
+ final String failure,
+ final int totalShards,
+ final List shardFailures) {
try {
- SnapshotInfo blobStoreSnapshot = new SnapshotInfo(snapshotId.getSnapshot(), indices, startTime, failure, System.currentTimeMillis(), totalShards, shardFailures);
- snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, snapshotId.getSnapshot());
+ SnapshotInfo blobStoreSnapshot = new SnapshotInfo(snapshotId,
+ indices,
+ startTime,
+ failure,
+ System.currentTimeMillis(),
+ totalShards,
+ shardFailures);
+ snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, blobId(snapshotId));
List snapshotIds = snapshots();
if (!snapshotIds.contains(snapshotId)) {
snapshotIds = new ArrayList<>(snapshotIds);
@@ -405,15 +414,22 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent legacyPrefixLength) {
- name = blobName.substring(prefixLength, blobName.length() - suffixLength);
+ final String str = blobName.substring(prefixLength, blobName.length() - suffixLength);
+ // TODO: this will go away once we make the snapshot file writes atomic and
+ // use it as the source of truth for the snapshots list instead of listing blobs
+ Tuple pair = parseNameUUIDFromBlobName(str);
+ name = pair.v1();
+ uuid = pair.v2();
} else if (blobName.startsWith(LEGACY_SNAPSHOT_PREFIX) && blobName.length() > suffixLength + prefixLength) {
name = blobName.substring(legacyPrefixLength);
+ uuid = SnapshotId.UNASSIGNED_UUID;
} else {
// not sure what it was - ignore
continue;
}
- snapshots.add(new SnapshotId(repositoryName, name));
+ snapshots.add(new SnapshotId(name, uuid));
}
return Collections.unmodifiableList(snapshots);
} catch (IOException ex) {
@@ -425,28 +441,25 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent indices) throws IOException {
- return readSnapshotMetaData(snapshotId, snapshot.version(), indices, false);
+ public MetaData readSnapshotMetaData(SnapshotInfo snapshot, List indices) throws IOException {
+ return readSnapshotMetaData(snapshot.snapshotId(), snapshot.version(), indices, false);
}
- /**
- * {@inheritDoc}
- */
@Override
- public SnapshotInfo readSnapshot(SnapshotId snapshotId) {
+ public SnapshotInfo readSnapshot(final SnapshotId snapshotId) {
try {
- return snapshotFormat.read(snapshotsBlobContainer, snapshotId.getSnapshot());
+ return snapshotFormat.read(snapshotsBlobContainer, blobId(snapshotId));
} catch (FileNotFoundException | NoSuchFileException ex) {
// File is missing - let's try legacy format instead
try {
- return snapshotLegacyFormat.read(snapshotsBlobContainer, snapshotId.getSnapshot());
+ return snapshotLegacyFormat.read(snapshotsBlobContainer, snapshotId.getName());
} catch (FileNotFoundException | NoSuchFileException ex1) {
- throw new SnapshotMissingException(snapshotId, ex);
+ throw new SnapshotMissingException(repositoryName, snapshotId, ex);
} catch (IOException | NotXContentException ex1) {
- throw new SnapshotException(snapshotId, "failed to get snapshots", ex1);
+ throw new SnapshotException(repositoryName, snapshotId, "failed to get snapshots", ex1);
}
} catch (IOException | NotXContentException ex) {
- throw new SnapshotException(snapshotId, "failed to get snapshots", ex);
+ throw new SnapshotException(repositoryName, snapshotId, "failed to get snapshots", ex);
}
}
@@ -456,27 +469,27 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent
@@ -561,9 +578,12 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent parseNameUUIDFromBlobName(final String str) {
+ final String name;
+ final String uuid;
+ final int sizeOfUUID = 22; // uuid is 22 chars in length
+ // unreliable, but highly unlikely to have a snapshot name with a dash followed by 22 characters,
+ // and this will go away before a release (see #18156).
+ //norelease
+ if (str.length() > sizeOfUUID + 1 && str.charAt(str.length() - sizeOfUUID - 1) == '-') {
+ // new naming convention, snapshot blob id has name and uuid
+ final int idx = str.length() - sizeOfUUID - 1;
+ name = str.substring(0, idx);
+ uuid = str.substring(idx + 1);
+ } else {
+ // old naming convention, before snapshots had UUIDs
+ name = str;
+ uuid = SnapshotId.UNASSIGNED_UUID;
+ }
+ return Tuple.tuple(name, uuid);
+ }
+
+ // Package private for testing
+ static String blobId(final SnapshotId snapshotId) {
+ final String uuid = snapshotId.getUUID();
+ if (uuid.equals(SnapshotId.UNASSIGNED_UUID)) {
+ // the old snapshot blob naming
+ return snapshotId.getName();
+ }
+ return snapshotId.getName() + "-" + uuid;
+ }
}
diff --git a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
index 77d4f1cc816..baea7fe28f9 100644
--- a/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
+++ b/core/src/main/java/org/elasticsearch/repositories/uri/URLRepository.java
@@ -19,7 +19,7 @@
package org.elasticsearch.repositories.uri;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.url.URLBlobStore;
@@ -42,6 +42,7 @@ import java.util.Arrays;
import java.util.Collections;
import java.util.List;
import java.util.function.Function;
+import java.util.function.Predicate;
/**
* Read-only URL-based implementation of the BlobStoreRepository
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
index 4764462d958..07c2611f2ce 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestRecoveryAction.java
@@ -154,8 +154,8 @@ public class RestRecoveryAction extends AbstractCatAction {
t.addCell(state.getSourceNode() == null ? "n/a" : state.getSourceNode().getName());
t.addCell(state.getTargetNode().getHostName());
t.addCell(state.getTargetNode().getName());
- t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository());
- t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot());
+ t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getRepository());
+ t.addCell(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getSnapshotId().getName());
t.addCell(state.getIndex().totalRecoverFiles());
t.addCell(state.getIndex().recoveredFileCount());
t.addCell(String.format(Locale.ROOT, "%1.1f%%", state.getIndex().recoveredFilesPercent()));
diff --git a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java
index 0d98dd20278..94d178e4db9 100644
--- a/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java
+++ b/core/src/main/java/org/elasticsearch/rest/action/cat/RestSnapshotAction.java
@@ -79,7 +79,7 @@ public class RestSnapshotAction extends AbstractCatAction {
protected Table getTableWithHeader(RestRequest request) {
return new Table()
.startHeaders()
- .addCell("id", "alias:id,snapshotId;desc:unique snapshot id")
+ .addCell("id", "alias:id,snapshot;desc:unique snapshot")
.addCell("status", "alias:s,status;text-align:right;desc:snapshot name")
.addCell("start_epoch", "alias:ste,startEpoch;desc:start time in seconds since 1970-01-01 00:00:00")
.addCell("start_time", "alias:sti,startTime;desc:start time in HH:MM:SS")
@@ -101,7 +101,7 @@ public class RestSnapshotAction extends AbstractCatAction {
for (SnapshotInfo snapshotStatus : getSnapshotsResponse.getSnapshots()) {
table.startRow();
- table.addCell(snapshotStatus.name());
+ table.addCell(snapshotStatus.snapshotId().getName());
table.addCell(snapshotStatus.state());
table.addCell(TimeUnit.SECONDS.convert(snapshotStatus.startTime(), TimeUnit.MILLISECONDS));
table.addCell(dateFormat.print(snapshotStatus.startTime()));
diff --git a/core/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java b/core/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java
index 4a844fb4907..73762e2d3a7 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/ConcurrentSnapshotExecutionException.java
@@ -19,7 +19,6 @@
package org.elasticsearch.snapshots;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.rest.RestStatus;
@@ -29,7 +28,12 @@ import java.io.IOException;
* Thrown when a user tries to start multiple snapshots at the same time
*/
public class ConcurrentSnapshotExecutionException extends SnapshotException {
- public ConcurrentSnapshotExecutionException(SnapshotId snapshot, String msg) {
+
+ public ConcurrentSnapshotExecutionException(final String repositoryName, final String snapshotName, final String msg) {
+ super(repositoryName, snapshotName, msg);
+ }
+
+ public ConcurrentSnapshotExecutionException(final Snapshot snapshot, final String msg) {
super(snapshot, msg);
}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java b/core/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java
index 48949c11d4c..2acc82add36 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/InvalidSnapshotNameException.java
@@ -19,7 +19,6 @@
package org.elasticsearch.snapshots;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.rest.RestStatus;
@@ -30,8 +29,8 @@ import java.io.IOException;
*/
public class InvalidSnapshotNameException extends SnapshotException {
- public InvalidSnapshotNameException(SnapshotId snapshot, String desc) {
- super(snapshot, "Invalid snapshot name [" + snapshot.getSnapshot() + "], " + desc);
+ public InvalidSnapshotNameException(final String repositoryName, final String snapshotName, String desc) {
+ super(repositoryName, snapshotName, "Invalid snapshot name [" + snapshotName + "], " + desc);
}
public InvalidSnapshotNameException(StreamInput in) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
index f6e6c4aaed5..44ca6fb972e 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/RestoreService.java
@@ -39,7 +39,6 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.metadata.MetaDataIndexUpgradeService;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
@@ -85,6 +84,8 @@ import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
+import java.util.Objects;
+import java.util.Optional;
import java.util.Set;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CopyOnWriteArrayList;
@@ -116,7 +117,7 @@ import static org.elasticsearch.common.util.set.Sets.newHashSet;
* method, which detects that shard should be restored from snapshot rather than recovered from gateway by looking
* at the {@link org.elasticsearch.cluster.routing.ShardRouting#restoreSource()} property.
*
- * At the end of the successful restore process {@code IndexShardSnapshotAndRestoreService} calls {@link #indexShardRestoreCompleted(SnapshotId, ShardId)},
+ * At the end of the successful restore process {@code IndexShardSnapshotAndRestoreService} calls {@link #indexShardRestoreCompleted(Snapshot, ShardId)},
* which updates {@link RestoreInProgress} in cluster state or removes it when all shards are completed. In case of
* restore failure a normal recovery fail-over process kicks in.
*/
@@ -153,8 +154,6 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
private final MetaDataCreateIndexService createIndexService;
- private final ClusterSettings dynamicSettings;
-
private final MetaDataIndexUpgradeService metaDataIndexUpgradeService;
private final CopyOnWriteArrayList> listeners = new CopyOnWriteArrayList<>();
@@ -164,7 +163,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Inject
public RestoreService(Settings settings, ClusterService clusterService, RepositoriesService repositoriesService, TransportService transportService,
- AllocationService allocationService, MetaDataCreateIndexService createIndexService, ClusterSettings dynamicSettings,
+ AllocationService allocationService, MetaDataCreateIndexService createIndexService,
MetaDataIndexUpgradeService metaDataIndexUpgradeService, ClusterSettings clusterSettings) {
super(settings);
this.clusterService = clusterService;
@@ -172,7 +171,6 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
this.transportService = transportService;
this.allocationService = allocationService;
this.createIndexService = createIndexService;
- this.dynamicSettings = dynamicSettings;
this.metaDataIndexUpgradeService = metaDataIndexUpgradeService;
transportService.registerRequestHandler(UPDATE_RESTORE_ACTION_NAME, UpdateIndexShardRestoreStatusRequest::new, ThreadPool.Names.SAME, new UpdateRestoreStateRequestHandler());
clusterService.add(this);
@@ -188,14 +186,20 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
public void restoreSnapshot(final RestoreRequest request, final ActionListener listener) {
try {
// Read snapshot info and metadata from the repository
- Repository repository = repositoriesService.repository(request.repository());
- final SnapshotId snapshotId = new SnapshotId(request.repository(), request.name());
- final SnapshotInfo snapshot = repository.readSnapshot(snapshotId);
- List filteredIndices = SnapshotUtils.filterIndices(snapshot.indices(), request.indices(), request.indicesOptions());
- MetaData metaDataIn = repository.readSnapshotMetaData(snapshotId, snapshot, filteredIndices);
+ Repository repository = repositoriesService.repository(request.repositoryName);
+ final Optional matchingSnapshotId = repository.snapshots().stream()
+ .filter(s -> request.snapshotName.equals(s.getName())).findFirst();
+ if (matchingSnapshotId.isPresent() == false) {
+ throw new SnapshotRestoreException(request.repositoryName, request.snapshotName, "snapshot does not exist");
+ }
+ final SnapshotId snapshotId = matchingSnapshotId.get();
+ final SnapshotInfo snapshotInfo = repository.readSnapshot(snapshotId);
+ final Snapshot snapshot = new Snapshot(request.repositoryName, snapshotId);
+ List filteredIndices = SnapshotUtils.filterIndices(snapshotInfo.indices(), request.indices(), request.indicesOptions());
+ MetaData metaDataIn = repository.readSnapshotMetaData(snapshotInfo, filteredIndices);
final MetaData metaData;
- if (snapshot.version().before(Version.V_2_0_0_beta1)) {
+ if (snapshotInfo.version().before(Version.V_2_0_0_beta1)) {
// ES 2.0 now requires units for all time and byte-sized settings, so we add the default unit if it's missing in this snapshot:
metaData = MetaData.addDefaultUnitsIfNeeded(logger, metaDataIn);
} else {
@@ -204,7 +208,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
// Make sure that we can restore from this snapshot
- validateSnapshotRestorable(snapshotId, snapshot);
+ validateSnapshotRestorable(request.repositoryName, snapshotInfo);
// Find list of indices that we need to restore
final Map renamedIndices = renamedIndices(request, filteredIndices);
@@ -220,7 +224,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
// same time
RestoreInProgress restoreInProgress = currentState.custom(RestoreInProgress.TYPE);
if (restoreInProgress != null && !restoreInProgress.entries().isEmpty()) {
- throw new ConcurrentSnapshotExecutionException(snapshotId, "Restore process is already running in this cluster");
+ throw new ConcurrentSnapshotExecutionException(snapshot, "Restore process is already running in this cluster");
}
// Updating cluster state
@@ -236,14 +240,14 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
for (Map.Entry indexEntry : renamedIndices.entrySet()) {
String index = indexEntry.getValue();
boolean partial = checkPartial(index);
- RestoreSource restoreSource = new RestoreSource(snapshotId, snapshot.version(), index);
+ RestoreSource restoreSource = new RestoreSource(snapshot, snapshotInfo.version(), index);
String renamedIndexName = indexEntry.getKey();
IndexMetaData snapshotIndexMetaData = metaData.index(index);
snapshotIndexMetaData = updateIndexSettings(snapshotIndexMetaData, request.indexSettings, request.ignoreIndexSettings);
try {
snapshotIndexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(snapshotIndexMetaData);
} catch (Exception ex) {
- throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
+ throw new SnapshotRestoreException(snapshot, "cannot restore index [" + index + "] because it cannot be upgraded", ex);
}
// Check that the index is closed or doesn't exist
IndexMetaData currentIndexMetaData = currentState.metaData().index(renamedIndexName);
@@ -309,7 +313,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
shards = shardsBuilder.build();
- RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshotId, RestoreInProgress.State.INIT, Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards);
+ RestoreInProgress.Entry restoreEntry = new RestoreInProgress.Entry(snapshot, RestoreInProgress.State.INIT, Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())), shards);
builder.putCustom(RestoreInProgress.TYPE, new RestoreInProgress(restoreEntry));
} else {
shards = ImmutableOpenMap.of();
@@ -322,28 +326,30 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
if (completed(shards)) {
// We don't have any indices to restore - we are done
- restoreInfo = new RestoreInfo(request.name(), Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())),
- shards.size(), shards.size() - failedShards(shards));
+ restoreInfo = new RestoreInfo(snapshotId.getName(),
+ Collections.unmodifiableList(new ArrayList<>(renamedIndices.keySet())),
+ shards.size(),
+ shards.size() - failedShards(shards));
}
RoutingTable rt = rtBuilder.build();
ClusterState updatedState = builder.metaData(mdBuilder).blocks(blocks).routingTable(rt).build();
RoutingAllocation.Result routingResult = allocationService.reroute(
ClusterState.builder(updatedState).routingTable(rt).build(),
- "restored snapshot [" + snapshotId + "]");
+ "restored snapshot [" + snapshot + "]");
return ClusterState.builder(updatedState).routingResult(routingResult).build();
}
private void checkAliasNameConflicts(Map renamedIndices, Set aliases) {
for (Map.Entry renamedIndex : renamedIndices.entrySet()) {
if (aliases.contains(renamedIndex.getKey())) {
- throw new SnapshotRestoreException(snapshotId, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
+ throw new SnapshotRestoreException(snapshot, "cannot rename index [" + renamedIndex.getValue() + "] into [" + renamedIndex.getKey() + "] because of conflict with an alias with the same name");
}
}
}
private void populateIgnoredShards(String index, IntSet ignoreShards) {
- for (SnapshotShardFailure failure : snapshot.shardFailures()) {
+ for (SnapshotShardFailure failure : snapshotInfo.shardFailures()) {
if (index.equals(failure.index())) {
ignoreShards.add(failure.shardId());
}
@@ -352,11 +358,11 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
private boolean checkPartial(String index) {
// Make sure that index was fully snapshotted
- if (failed(snapshot, index)) {
+ if (failed(snapshotInfo, index)) {
if (request.partial()) {
return true;
} else {
- throw new SnapshotRestoreException(snapshotId, "index [" + index + "] wasn't fully snapshotted - cannot restore");
+ throw new SnapshotRestoreException(snapshot, "index [" + index + "] wasn't fully snapshotted - cannot restore");
}
} else {
return false;
@@ -367,15 +373,15 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
// Index exist - checking that it's closed
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
// TODO: Enable restore for open indices
- throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] because it's open");
+ throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] because it's open");
}
// Index exist - checking if it's partial restore
if (partial) {
- throw new SnapshotRestoreException(snapshotId, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
+ throw new SnapshotRestoreException(snapshot, "cannot restore partial index [" + renamedIndex + "] because such index already exists");
}
// Make sure that the number of shards is the same. That's the only thing that we cannot change
if (currentIndexMetaData.getNumberOfShards() != snapshotIndexMetaData.getNumberOfShards()) {
- throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
+ throw new SnapshotRestoreException(snapshot, "cannot restore index [" + renamedIndex + "] with [" + currentIndexMetaData.getNumberOfShards() +
"] shard from snapshot with [" + snapshotIndexMetaData.getNumberOfShards() + "] shards");
}
}
@@ -395,7 +401,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
for (String ignoredSetting : ignoreSettings) {
if (!Regex.isSimpleMatchPattern(ignoredSetting)) {
if (UNREMOVABLE_SETTINGS.contains(ignoredSetting)) {
- throw new SnapshotRestoreException(snapshotId, "cannot remove setting [" + ignoredSetting + "] on restore");
+ throw new SnapshotRestoreException(snapshot, "cannot remove setting [" + ignoredSetting + "] on restore");
} else {
settingsMap.remove(ignoredSetting);
}
@@ -417,7 +423,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
for(Map.Entry entry : normalizedChangeSettings.getAsMap().entrySet()) {
if (UNMODIFIABLE_SETTINGS.contains(entry.getKey())) {
- throw new SnapshotRestoreException(snapshotId, "cannot modify setting [" + entry.getKey() + "] on restore");
+ throw new SnapshotRestoreException(snapshot, "cannot modify setting [" + entry.getKey() + "] on restore");
} else {
settingsMap.put(entry.getKey(), entry.getValue());
}
@@ -471,7 +477,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
} catch (Throwable e) {
- logger.warn("[{}][{}] failed to restore snapshot", e, request.repository(), request.name());
+ logger.warn("[{}] failed to restore snapshot", e, request.repositoryName + ":" + request.snapshotName);
listener.onFailure(e);
}
}
@@ -480,28 +486,28 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
* This method is used by {@link IndexShard} to notify
* {@code RestoreService} about shard restore completion.
*
- * @param snapshotId snapshot id
+ * @param snapshot snapshot
* @param shardId shard id
*/
- public void indexShardRestoreCompleted(SnapshotId snapshotId, ShardId shardId) {
- logger.trace("[{}] successfully restored shard [{}]", snapshotId, shardId);
- UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshotId, shardId,
+ public void indexShardRestoreCompleted(Snapshot snapshot, ShardId shardId) {
+ logger.trace("[{}] successfully restored shard [{}]", snapshot, shardId);
+ UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshot, shardId,
new ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.SUCCESS));
- transportService.sendRequest(clusterService.state().nodes().getMasterNode(),
- UPDATE_RESTORE_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
+ transportService.sendRequest(clusterService.state().nodes().getMasterNode(),
+ UPDATE_RESTORE_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
}
public final static class RestoreCompletionResponse {
- private final SnapshotId snapshotId;
+ private final Snapshot snapshot;
private final RestoreInfo restoreInfo;
- private RestoreCompletionResponse(SnapshotId snapshotId, RestoreInfo restoreInfo) {
- this.snapshotId = snapshotId;
+ private RestoreCompletionResponse(final Snapshot snapshot, final RestoreInfo restoreInfo) {
+ this.snapshot = snapshot;
this.restoreInfo = restoreInfo;
}
- public SnapshotId getSnapshotId() {
- return snapshotId;
+ public Snapshot getSnapshot() {
+ return snapshot;
}
public RestoreInfo getRestoreInfo() {
@@ -520,7 +526,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
clusterService.submitStateUpdateTask("update snapshot state", new ClusterStateUpdateTask() {
private final List drainedRequests = new ArrayList<>();
- private Map>> batchedRestoreInfo = null;
+ private Map>> batchedRestoreInfo = null;
@Override
public ClusterState execute(ClusterState currentState) {
@@ -549,8 +555,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
final UpdateIndexShardRestoreStatusRequest updateSnapshotState = drainedRequests.get(i);
updateSnapshotState.processed = true;
- if (entry.snapshotId().equals(updateSnapshotState.snapshotId())) {
- logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshotId(), updateSnapshotState.shardId(), updateSnapshotState.status().state());
+ if (entry.snapshot().equals(updateSnapshotState.snapshot())) {
+ logger.trace("[{}] Updating shard [{}] with status [{}]", updateSnapshotState.snapshot(), updateSnapshotState.shardId(), updateSnapshotState.status().state());
if (shardsBuilder == null) {
shardsBuilder = ImmutableOpenMap.builder(entry.shards());
}
@@ -562,16 +568,19 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
if (shardsBuilder != null) {
ImmutableOpenMap shards = shardsBuilder.build();
if (!completed(shards)) {
- entries.add(new RestoreInProgress.Entry(entry.snapshotId(), RestoreInProgress.State.STARTED, entry.indices(), shards));
+ entries.add(new RestoreInProgress.Entry(entry.snapshot(), RestoreInProgress.State.STARTED, entry.indices(), shards));
} else {
- logger.info("restore [{}] is done", entry.snapshotId());
+ logger.info("restore [{}] is done", entry.snapshot());
if (batchedRestoreInfo == null) {
batchedRestoreInfo = new HashMap<>();
}
- assert !batchedRestoreInfo.containsKey(entry.snapshotId());
- batchedRestoreInfo.put(entry.snapshotId(),
+ assert !batchedRestoreInfo.containsKey(entry.snapshot());
+ batchedRestoreInfo.put(entry.snapshot(),
new Tuple<>(
- new RestoreInfo(entry.snapshotId().getSnapshot(), entry.indices(), shards.size(), shards.size() - failedShards(shards)),
+ new RestoreInfo(entry.snapshot().getSnapshotId().getName(),
+ entry.indices(),
+ shards.size(),
+ shards.size() - failedShards(shards)),
shards));
}
} else {
@@ -592,15 +601,15 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Override
public void onFailure(String source, @Nullable Throwable t) {
for (UpdateIndexShardRestoreStatusRequest request : drainedRequests) {
- logger.warn("[{}][{}] failed to update snapshot status to [{}]", t, request.snapshotId(), request.shardId(), request.status());
+ logger.warn("[{}][{}] failed to update snapshot status to [{}]", t, request.snapshot(), request.shardId(), request.status());
}
}
@Override
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
if (batchedRestoreInfo != null) {
- for (final Entry>> entry : batchedRestoreInfo.entrySet()) {
- final SnapshotId snapshotId = entry.getKey();
+ for (final Entry>> entry : batchedRestoreInfo.entrySet()) {
+ final Snapshot snapshot = entry.getKey();
final RestoreInfo restoreInfo = entry.getValue().v1();
final ImmutableOpenMap shards = entry.getValue().v2();
RoutingTable routingTable = newState.getRoutingTable();
@@ -610,13 +619,13 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
ShardId shardId = shard.key;
ShardRouting shardRouting = findPrimaryShard(routingTable, shardId);
if (shardRouting != null && !shardRouting.active()) {
- logger.trace("[{}][{}] waiting for the shard to start", snapshotId, shardId);
+ logger.trace("[{}][{}] waiting for the shard to start", snapshot, shardId);
waitForStarted.add(shardId);
}
}
}
if (waitForStarted.isEmpty()) {
- notifyListeners(snapshotId, restoreInfo);
+ notifyListeners(snapshot, restoreInfo);
} else {
clusterService.addLast(new ClusterStateListener() {
@Override
@@ -629,12 +638,12 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
// Shard disappeared (index deleted) or became active
if (shardRouting == null || shardRouting.active()) {
iterator.remove();
- logger.trace("[{}][{}] shard disappeared or started - removing", snapshotId, shardId);
+ logger.trace("[{}][{}] shard disappeared or started - removing", snapshot, shardId);
}
}
}
if (waitForStarted.isEmpty()) {
- notifyListeners(snapshotId, restoreInfo);
+ notifyListeners(snapshot, restoreInfo);
clusterService.remove(this);
}
}
@@ -655,10 +664,10 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
return null;
}
- private void notifyListeners(SnapshotId snapshotId, RestoreInfo restoreInfo) {
+ private void notifyListeners(Snapshot snapshot, RestoreInfo restoreInfo) {
for (ActionListener listener : listeners) {
try {
- listener.onResponse(new RestoreCompletionResponse(snapshotId, restoreInfo));
+ listener.onResponse(new RestoreCompletionResponse(snapshot, restoreInfo));
} catch (Throwable e) {
logger.warn("failed to update snapshot status for [{}]", e, listener);
}
@@ -695,7 +704,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
String previousIndex = renamedIndices.put(renamedIndex, index);
if (previousIndex != null) {
- throw new SnapshotRestoreException(new SnapshotId(request.repository(), request.name()),
+ throw new SnapshotRestoreException(request.repositoryName, request.snapshotName,
"indices [" + index + "] and [" + previousIndex + "] are renamed into the same index [" + renamedIndex + "]");
}
}
@@ -705,16 +714,18 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
/**
* Checks that snapshots can be restored and have compatible version
*
- * @param snapshotId snapshot id
- * @param snapshot snapshot metadata
+ * @param repository repository name
+ * @param snapshotInfo snapshot metadata
*/
- private void validateSnapshotRestorable(SnapshotId snapshotId, SnapshotInfo snapshot) {
- if (!snapshot.state().restorable()) {
- throw new SnapshotRestoreException(snapshotId, "unsupported snapshot state [" + snapshot.state() + "]");
+ private void validateSnapshotRestorable(final String repository, final SnapshotInfo snapshotInfo) {
+ if (!snapshotInfo.state().restorable()) {
+ throw new SnapshotRestoreException(new Snapshot(repository, snapshotInfo.snapshotId()),
+ "unsupported snapshot state [" + snapshotInfo.state() + "]");
}
- if (Version.CURRENT.before(snapshot.version())) {
- throw new SnapshotRestoreException(snapshotId, "the snapshot was created with Elasticsearch version [" +
- snapshot.version() + "] which is higher than the version of this node [" + Version.CURRENT + "]");
+ if (Version.CURRENT.before(snapshotInfo.version())) {
+ throw new SnapshotRestoreException(new Snapshot(repository, snapshotInfo.snapshotId()),
+ "the snapshot was created with Elasticsearch version [" + snapshotInfo.version() +
+ "] which is higher than the version of this node [" + Version.CURRENT + "]");
}
}
@@ -746,8 +757,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
if (shardsToFail != null) {
for (ShardId shardId : shardsToFail) {
- logger.trace("[{}] failing running shard restore [{}]", entry.snapshotId(), shardId);
- updateRestoreStateOnMaster(new UpdateIndexShardRestoreStatusRequest(entry.snapshotId(), shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted")));
+ logger.trace("[{}] failing running shard restore [{}]", entry.snapshot(), shardId);
+ updateRestoreStateOnMaster(new UpdateIndexShardRestoreStatusRequest(entry.snapshot(), shardId, new ShardRestoreStatus(null, RestoreInProgress.State.FAILURE, "index was deleted")));
}
}
}
@@ -757,12 +768,12 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
/**
* Fails the given snapshot restore operation for the given shard
*/
- public void failRestore(SnapshotId snapshotId, ShardId shardId) {
- logger.debug("[{}] failed to restore shard [{}]", snapshotId, shardId);
- UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshotId, shardId,
+ public void failRestore(Snapshot snapshot, ShardId shardId) {
+ logger.debug("[{}] failed to restore shard [{}]", snapshot, shardId);
+ UpdateIndexShardRestoreStatusRequest request = new UpdateIndexShardRestoreStatusRequest(snapshot, shardId,
new ShardRestoreStatus(clusterService.state().nodes().getLocalNodeId(), RestoreInProgress.State.FAILURE));
- transportService.sendRequest(clusterService.state().nodes().getMasterNode(),
- UPDATE_RESTORE_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
+ transportService.sendRequest(clusterService.state().nodes().getMasterNode(),
+ UPDATE_RESTORE_ACTION_NAME, request, EmptyTransportResponseHandler.INSTANCE_SAME);
}
private boolean failed(SnapshotInfo snapshot, String index) {
@@ -846,7 +857,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
RestoreInProgress snapshots = clusterState.custom(RestoreInProgress.TYPE);
if (snapshots != null) {
for (RestoreInProgress.Entry snapshot : snapshots.entries()) {
- if (repository.equals(snapshot.snapshotId().getRepository())) {
+ if (repository.equals(snapshot.snapshot().getRepository())) {
return true;
}
}
@@ -861,9 +872,9 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
final private String cause;
- final private String name;
+ final private String repositoryName;
- final private String repository;
+ final private String snapshotName;
final private String[] indices;
@@ -890,9 +901,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
/**
* Constructs new restore request
*
- * @param cause cause for restoring the snapshot
- * @param repository repository name
- * @param name snapshot name
+ * @param repositoryName repositoryName
+ * @param snapshotName snapshotName
* @param indices list of indices to restore
* @param indicesOptions indices options
* @param renamePattern pattern to rename indices
@@ -903,14 +913,14 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
* @param partial allow partial restore
* @param indexSettings index settings that should be changed on restore
* @param ignoreIndexSettings index settings that shouldn't be restored
+ * @param cause cause for restoring the snapshot
*/
- public RestoreRequest(String cause, String repository, String name, String[] indices, IndicesOptions indicesOptions,
+ public RestoreRequest(String repositoryName, String snapshotName, String[] indices, IndicesOptions indicesOptions,
String renamePattern, String renameReplacement, Settings settings,
TimeValue masterNodeTimeout, boolean includeGlobalState, boolean partial, boolean includeAliases,
- Settings indexSettings, String[] ignoreIndexSettings ) {
- this.cause = cause;
- this.name = name;
- this.repository = repository;
+ Settings indexSettings, String[] ignoreIndexSettings, String cause) {
+ this.repositoryName = Objects.requireNonNull(repositoryName);
+ this.snapshotName = Objects.requireNonNull(snapshotName);
this.indices = indices;
this.renamePattern = renamePattern;
this.renameReplacement = renameReplacement;
@@ -922,7 +932,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
this.includeAliases = includeAliases;
this.indexSettings = indexSettings;
this.ignoreIndexSettings = ignoreIndexSettings;
-
+ this.cause = cause;
}
/**
@@ -934,22 +944,22 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
return cause;
}
- /**
- * Returns snapshot name
- *
- * @return snapshot name
- */
- public String name() {
- return name;
- }
-
/**
* Returns repository name
*
* @return repository name
*/
- public String repository() {
- return repository;
+ public String repositoryName() {
+ return repositoryName;
+ }
+
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ public String snapshotName() {
+ return snapshotName;
}
/**
@@ -1058,7 +1068,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
* Internal class that is used to send notifications about finished shard restore operations to master node
*/
public static class UpdateIndexShardRestoreStatusRequest extends TransportRequest {
- private SnapshotId snapshotId;
+ private Snapshot snapshot;
private ShardId shardId;
private ShardRestoreStatus status;
@@ -1068,8 +1078,8 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
}
- private UpdateIndexShardRestoreStatusRequest(SnapshotId snapshotId, ShardId shardId, ShardRestoreStatus status) {
- this.snapshotId = snapshotId;
+ private UpdateIndexShardRestoreStatusRequest(Snapshot snapshot, ShardId shardId, ShardRestoreStatus status) {
+ this.snapshot = snapshot;
this.shardId = shardId;
this.status = status;
}
@@ -1077,7 +1087,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
- snapshotId = SnapshotId.readSnapshotId(in);
+ snapshot = new Snapshot(in);
shardId = ShardId.readShardId(in);
status = ShardRestoreStatus.readShardRestoreStatus(in);
}
@@ -1085,13 +1095,13 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
- snapshotId.writeTo(out);
+ snapshot.writeTo(out);
shardId.writeTo(out);
status.writeTo(out);
}
- public SnapshotId snapshotId() {
- return snapshotId;
+ public Snapshot snapshot() {
+ return snapshot;
}
public ShardId shardId() {
@@ -1104,7 +1114,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
@Override
public String toString() {
- return "" + snapshotId + ", shardId [" + shardId + "], status [" + status.state() + "]";
+ return "" + snapshot + ", shardId [" + shardId + "], status [" + status.state() + "]";
}
}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java
new file mode 100644
index 00000000000..314cd4053dd
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/snapshots/Snapshot.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+
+import java.io.IOException;
+import java.util.Objects;
+
+/**
+ * Basic information about a snapshot - a SnapshotId and the repository that the snapshot belongs to.
+ */
+public final class Snapshot implements Writeable {
+
+ private final String repository;
+ private final SnapshotId snapshotId;
+ private final int hashCode;
+
+ /**
+ * Constructs a snapshot.
+ */
+ public Snapshot(final String repository, final SnapshotId snapshotId) {
+ this.repository = Objects.requireNonNull(repository);
+ this.snapshotId = Objects.requireNonNull(snapshotId);
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Constructs a snapshot from the stream input.
+ */
+ public Snapshot(final StreamInput in) throws IOException {
+ repository = in.readString();
+ snapshotId = new SnapshotId(in);
+ hashCode = computeHashCode();
+ }
+
+ /**
+ * Gets the repository name for the snapshot.
+ */
+ public String getRepository() {
+ return repository;
+ }
+
+ /**
+ * Gets the snapshot id for the snapshot.
+ */
+ public SnapshotId getSnapshotId() {
+ return snapshotId;
+ }
+
+ @Override
+ public String toString() {
+ return repository + ":" + snapshotId.toString();
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ @SuppressWarnings("unchecked") Snapshot that = (Snapshot) o;
+ return repository.equals(that.repository) && snapshotId.equals(that.snapshotId);
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ private int computeHashCode() {
+ return Objects.hash(repository, snapshotId);
+ }
+
+ @Override
+ public void writeTo(final StreamOutput out) throws IOException {
+ out.writeString(repository);
+ snapshotId.writeTo(out);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java
index 58faecda4a4..32d3992f243 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotCreationException.java
@@ -19,7 +19,6 @@
package org.elasticsearch.snapshots;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
@@ -29,11 +28,19 @@ import java.io.IOException;
*/
public class SnapshotCreationException extends SnapshotException {
+ public SnapshotCreationException(final String repositoryName, final String snapshotName, final Throwable cause) {
+ super(repositoryName, snapshotName, "failed to create snapshot", cause);
+ }
+
+ public SnapshotCreationException(final String repositoryName, final SnapshotId snapshotId, final Throwable cause) {
+ super(repositoryName, snapshotId, "failed to create snapshot", cause);
+ }
+
+ public SnapshotCreationException(final String repositoryName, final SnapshotId snapshotId, final String msg) {
+ super(repositoryName, snapshotId, msg);
+ }
+
public SnapshotCreationException(StreamInput in) throws IOException {
super(in);
}
-
- public SnapshotCreationException(SnapshotId snapshot, Throwable cause) {
- super(snapshot, "failed to create snapshot", cause);
- }
}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotException.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotException.java
index b109c46f46b..0acd73d62ea 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotException.java
@@ -20,7 +20,7 @@
package org.elasticsearch.snapshots;
import org.elasticsearch.ElasticsearchException;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@@ -30,33 +30,68 @@ import java.io.IOException;
* Generic snapshot exception
*/
public class SnapshotException extends ElasticsearchException {
- private final SnapshotId snapshot;
- public SnapshotException(SnapshotId snapshot, String msg) {
+ @Nullable
+ private final String repositoryName;
+ @Nullable
+ private final String snapshotName;
+
+ public SnapshotException(final Snapshot snapshot, final String msg) {
this(snapshot, msg, null);
}
- public SnapshotException(SnapshotId snapshot, String msg, Throwable cause) {
+ public SnapshotException(final Snapshot snapshot, final String msg, final Throwable cause) {
super("[" + (snapshot == null ? "_na" : snapshot) + "] " + msg, cause);
- this.snapshot = snapshot;
+ if (snapshot != null) {
+ this.repositoryName = snapshot.getRepository();
+ this.snapshotName = snapshot.getSnapshotId().getName();
+ } else {
+ this.repositoryName = null;
+ this.snapshotName = null;
+ }
}
- public SnapshotException(StreamInput in) throws IOException {
+ public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg) {
+ this(repositoryName, snapshotId, msg, null);
+ }
+
+ public SnapshotException(final String repositoryName, final SnapshotId snapshotId, final String msg, final Throwable cause) {
+ super("[" + repositoryName + ":" + snapshotId + "] " + msg, cause);
+ this.repositoryName = repositoryName;
+ this.snapshotName = snapshotId.getName();
+ }
+
+ public SnapshotException(final String repositoryName, final String snapshotName, final String msg) {
+ this(repositoryName, snapshotName, msg, null);
+ }
+
+ public SnapshotException(final String repositoryName, final String snapshotName, final String msg, final Throwable cause) {
+ super("[" + repositoryName + ":" + snapshotName + "]" + msg, cause);
+ this.repositoryName = repositoryName;
+ this.snapshotName = snapshotName;
+ }
+
+ public SnapshotException(final StreamInput in) throws IOException {
super(in);
- if (in.readBoolean()) {
- snapshot = SnapshotId.readSnapshotId(in);
- } else {
- snapshot = null;
- }
+ repositoryName = in.readOptionalString();
+ snapshotName = in.readOptionalString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
- out.writeOptionalStreamable(snapshot);
+ out.writeOptionalString(repositoryName);
+ out.writeOptionalString(snapshotName);
}
- public SnapshotId snapshot() {
- return snapshot;
+ @Nullable
+ public String getRepositoryName() {
+ return repositoryName;
}
+
+ @Nullable
+ public String getSnapshotName() {
+ return snapshotName;
+ }
+
}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotId.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotId.java
new file mode 100644
index 00000000000..16f371b28f7
--- /dev/null
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotId.java
@@ -0,0 +1,118 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.io.stream.Writeable;
+
+import java.io.IOException;
+import java.util.Objects;
+
+/**
+ * SnapshotId - snapshot name + snapshot UUID
+ */
+public final class SnapshotId implements Writeable {
+
+ /**
+ * This value is for older snapshots that don't have a UUID.
+ */
+ public static final String UNASSIGNED_UUID = "_na_";
+
+ private final String name;
+ private final String uuid;
+
+ // Caching hash code
+ private final int hashCode;
+
+ /**
+ * Constructs a new snapshot
+ *
+ * @param name snapshot name
+ * @param uuid snapshot uuid
+ */
+ public SnapshotId(final String name, final String uuid) {
+ this.name = Objects.requireNonNull(name);
+ this.uuid = Objects.requireNonNull(uuid);
+ this.hashCode = computeHashCode();
+ }
+
+ /**
+ * Constructs a new snapshot from a input stream
+ *
+ * @param in input stream
+ */
+ public SnapshotId(final StreamInput in) throws IOException {
+ name = in.readString();
+ uuid = in.readString();
+ hashCode = computeHashCode();
+ }
+
+ /**
+ * Returns snapshot name
+ *
+ * @return snapshot name
+ */
+ public String getName() {
+ return name;
+ }
+
+ /**
+ * Returns the snapshot UUID
+ *
+ * @return snapshot uuid
+ */
+ public String getUUID() {
+ return uuid;
+ }
+
+ @Override
+ public String toString() {
+ return name + "/" + uuid;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) {
+ return true;
+ }
+ if (o == null || getClass() != o.getClass()) {
+ return false;
+ }
+ @SuppressWarnings("unchecked") final SnapshotId that = (SnapshotId) o;
+ return name.equals(that.name) && uuid.equals(that.uuid);
+ }
+
+ @Override
+ public int hashCode() {
+ return hashCode;
+ }
+
+ private int computeHashCode() {
+ return Objects.hash(name, uuid);
+ }
+
+ @Override
+ public void writeTo(StreamOutput out) throws IOException {
+ out.writeString(name);
+ out.writeString(uuid);
+ }
+
+}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
index 2b8ea8ace31..2159fda2237 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotInfo.java
@@ -37,17 +37,19 @@ import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
+import java.util.Objects;
/**
* Information about a snapshot
*/
public final class SnapshotInfo implements Comparable, ToXContent, FromXContentBuilder, Writeable {
- public static final SnapshotInfo PROTO = new SnapshotInfo("", Collections.emptyList(), 0);
+ public static final SnapshotInfo PROTO = new SnapshotInfo(new SnapshotId("", ""), Collections.emptyList(), 0);
public static final String CONTEXT_MODE_PARAM = "context_mode";
public static final String CONTEXT_MODE_SNAPSHOT = "SNAPSHOT";
private static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("strictDateOptionalTime");
private static final String SNAPSHOT = "snapshot";
+ private static final String UUID = "uuid";
private static final String INDICES = "indices";
private static final String STATE = "state";
private static final String REASON = "reason";
@@ -68,7 +70,7 @@ public final class SnapshotInfo implements Comparable, ToXContent,
private static final String TOTAL_SHARDS = "total_shards";
private static final String SUCCESSFUL_SHARDS = "successful_shards";
- private final String name;
+ private final SnapshotId snapshotId;
private final SnapshotState state;
@@ -88,39 +90,35 @@ public final class SnapshotInfo implements Comparable, ToXContent,
private final List shardFailures;
- public SnapshotInfo(String name, List indices, long startTime) {
- this(name, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, 0, 0, Collections.emptyList());
+ public SnapshotInfo(SnapshotId snapshotId, List indices, long startTime) {
+ this(snapshotId, indices, SnapshotState.IN_PROGRESS, null, Version.CURRENT, startTime, 0L, 0, 0, Collections.emptyList());
}
- public SnapshotInfo(String name, List indices, long startTime, String reason, long endTime,
+ public SnapshotInfo(SnapshotId snapshotId, List indices, long startTime, String reason, long endTime,
int totalShards, List shardFailures) {
- this(name, indices, snapshotState(reason, shardFailures), reason, Version.CURRENT,
+ this(snapshotId, indices, snapshotState(reason, shardFailures), reason, Version.CURRENT,
startTime, endTime, totalShards, totalShards - shardFailures.size(), shardFailures);
}
- private SnapshotInfo(String name, List indices, SnapshotState state, String reason, Version version, long startTime,
- long endTime, int totalShards, int successfulShards, List shardFailures) {
- assert name != null;
- assert indices != null;
- assert state != null;
- assert shardFailures != null;
- this.name = name;
- this.indices = indices;
- this.state = state;
+ private SnapshotInfo(SnapshotId snapshotId, List indices, SnapshotState state, String reason, Version version,
+ long startTime, long endTime, int totalShards, int successfulShards, List shardFailures) {
+ this.snapshotId = Objects.requireNonNull(snapshotId);
+ this.indices = Objects.requireNonNull(indices);
+ this.state = Objects.requireNonNull(state);
this.reason = reason;
this.version = version;
this.startTime = startTime;
this.endTime = endTime;
this.totalShards = totalShards;
this.successfulShards = successfulShards;
- this.shardFailures = shardFailures;
+ this.shardFailures = Objects.requireNonNull(shardFailures);
}
/**
* Constructs snapshot information from stream input
*/
public SnapshotInfo(final StreamInput in) throws IOException {
- name = in.readString();
+ snapshotId = new SnapshotId(in);
int size = in.readVInt();
List indicesListBuilder = new ArrayList<>();
for (int i = 0; i < size; i++) {
@@ -147,12 +145,12 @@ public final class SnapshotInfo implements Comparable, ToXContent,
}
/**
- * Returns snapshot name
+ * Returns snapshot id
*
- * @return snapshot name
+ * @return snapshot id
*/
- public String name() {
- return name;
+ public SnapshotId snapshotId() {
+ return snapshotId;
}
/**
@@ -270,16 +268,21 @@ public final class SnapshotInfo implements Comparable, ToXContent,
}
final SnapshotInfo that = (SnapshotInfo) o;
- return startTime == that.startTime && name.equals(that.name);
+ return startTime == that.startTime && snapshotId.equals(that.snapshotId);
}
@Override
public int hashCode() {
- int result = name.hashCode();
+ int result = snapshotId.hashCode();
result = 31 * result + Long.hashCode(startTime);
return result;
}
+ @Override
+ public String toString() {
+ return "SnapshotInfo[snapshotId=" + snapshotId + ", state=" + state + ", indices=" + indices + "]";
+ }
+
/**
* Returns snapshot REST status
*/
@@ -303,7 +306,8 @@ public final class SnapshotInfo implements Comparable, ToXContent,
// write snapshot info for the API and any other situations
builder.startObject();
- builder.field(SNAPSHOT, name);
+ builder.field(SNAPSHOT, snapshotId.getName());
+ builder.field(UUID, snapshotId.getUUID());
builder.field(VERSION_ID, version.id);
builder.field(VERSION, version.toString());
builder.startArray(INDICES);
@@ -342,7 +346,8 @@ public final class SnapshotInfo implements Comparable, ToXContent,
private XContentBuilder toXContentSnapshot(final XContentBuilder builder, final ToXContent.Params params) throws IOException {
builder.startObject(SNAPSHOT);
- builder.field(NAME, name);
+ builder.field(NAME, snapshotId.getName());
+ builder.field(UUID, snapshotId.getUUID());
builder.field(VERSION_ID, version.id);
builder.startArray(INDICES);
for (String index : indices) {
@@ -380,13 +385,14 @@ public final class SnapshotInfo implements Comparable, ToXContent,
*/
public static SnapshotInfo fromXContent(final XContentParser parser) throws IOException {
String name = null;
+ String uuid = null;
Version version = Version.CURRENT;
SnapshotState state = SnapshotState.IN_PROGRESS;
String reason = null;
List indices = Collections.emptyList();
long startTime = 0;
long endTime = 0;
- int totalShard = 0;
+ int totalShards = 0;
int successfulShards = 0;
List shardFailures = Collections.emptyList();
if (parser.currentToken() == null) { // fresh parser? move to the first token
@@ -406,6 +412,8 @@ public final class SnapshotInfo implements Comparable, ToXContent,
if (token.isValue()) {
if (NAME.equals(currentFieldName)) {
name = parser.text();
+ } else if (UUID.equals(currentFieldName)) {
+ uuid = parser.text();
} else if (STATE.equals(currentFieldName)) {
state = SnapshotState.valueOf(parser.text());
} else if (REASON.equals(currentFieldName)) {
@@ -415,7 +423,7 @@ public final class SnapshotInfo implements Comparable, ToXContent,
} else if (END_TIME.equals(currentFieldName)) {
endTime = parser.longValue();
} else if (TOTAL_SHARDS.equals(currentFieldName)) {
- totalShard = parser.intValue();
+ totalShards = parser.intValue();
} else if (SUCCESSFUL_SHARDS.equals(currentFieldName)) {
successfulShards = parser.intValue();
} else if (VERSION_ID.equals(currentFieldName)) {
@@ -448,12 +456,25 @@ public final class SnapshotInfo implements Comparable, ToXContent,
} else {
throw new ElasticsearchParseException("unexpected token [" + token + "]");
}
- return new SnapshotInfo(name, indices, state, reason, version, startTime, endTime, totalShard, successfulShards, shardFailures);
+ if (uuid == null) {
+ // the old format where there wasn't a UUID
+ uuid = SnapshotId.UNASSIGNED_UUID;
+ }
+ return new SnapshotInfo(new SnapshotId(name, uuid),
+ indices,
+ state,
+ reason,
+ version,
+ startTime,
+ endTime,
+ totalShards,
+ successfulShards,
+ shardFailures);
}
@Override
public void writeTo(final StreamOutput out) throws IOException {
- out.writeString(name);
+ snapshotId.writeTo(out);
out.writeVInt(indices.size());
for (String index : indices) {
out.writeString(index);
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
index 27fe3de51e0..5f0979e38d8 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotMissingException.java
@@ -19,7 +19,6 @@
package org.elasticsearch.snapshots;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.rest.RestStatus;
@@ -30,12 +29,16 @@ import java.io.IOException;
*/
public class SnapshotMissingException extends SnapshotException {
- public SnapshotMissingException(SnapshotId snapshot, Throwable cause) {
- super(snapshot, "is missing", cause);
+ public SnapshotMissingException(final String repositoryName, final SnapshotId snapshotId, final Throwable cause) {
+ super(repositoryName, snapshotId, "is missing", cause);
}
- public SnapshotMissingException(SnapshotId snapshot) {
- super(snapshot, "is missing");
+ public SnapshotMissingException(final String repositoryName, final SnapshotId snapshotId) {
+ super(repositoryName, snapshotId, "is missing");
+ }
+
+ public SnapshotMissingException(final String repositoryName, final String snapshotName) {
+ super(repositoryName, snapshotName, "is missing");
}
public SnapshotMissingException(StreamInput in) throws IOException {
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java
index 940f8162e6b..9003a08a54a 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotRestoreException.java
@@ -19,7 +19,6 @@
package org.elasticsearch.snapshots;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.common.io.stream.StreamInput;
import java.io.IOException;
@@ -28,11 +27,19 @@ import java.io.IOException;
* Snapshot restore exception
*/
public class SnapshotRestoreException extends SnapshotException {
- public SnapshotRestoreException(SnapshotId snapshot, String message) {
+ public SnapshotRestoreException(final String repositoryName, final String snapshotName, final String message) {
+ super(repositoryName, snapshotName, message);
+ }
+
+ public SnapshotRestoreException(final String repositoryName, final String snapshotName, final String message, final Throwable cause) {
+ super(repositoryName, snapshotName, message, cause);
+ }
+
+ public SnapshotRestoreException(final Snapshot snapshot, final String message) {
super(snapshot, message);
}
- public SnapshotRestoreException(SnapshotId snapshot, String message, Throwable cause) {
+ public SnapshotRestoreException(final Snapshot snapshot, final String message, final Throwable cause) {
super(snapshot, message, cause);
}
diff --git a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
index c2a33680efa..b9d8c729223 100644
--- a/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
+++ b/core/src/main/java/org/elasticsearch/snapshots/SnapshotShardsService.java
@@ -27,7 +27,6 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.SnapshotsInProgress;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@@ -94,7 +93,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent shardSnapshots = emptyMap();
+ private volatile Map shardSnapshots = emptyMap();
private final BlockingQueue updatedSnapshotStateQueue = ConcurrentCollections.newBlockingQueue();
@@ -176,11 +175,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent
*
- * @param snapshotId snapshot id
+ * @param snapshot snapshot
* @return map of shard id to snapshot status
*/
- public Map currentSnapshotShards(SnapshotId snapshotId) {
- SnapshotShards snapshotShards = shardSnapshots.get(snapshotId);
+ public Map currentSnapshotShards(Snapshot snapshot) {
+ SnapshotShards snapshotShards = shardSnapshots.get(snapshot);
if (snapshotShards == null) {
return null;
} else {
@@ -195,24 +194,25 @@ public class SnapshotShardsService extends AbstractLifecycleComponent survivors = new HashMap<>();
+ Map survivors = new HashMap<>();
// First, remove snapshots that are no longer there
- for (Map.Entry entry : shardSnapshots.entrySet()) {
- if (snapshotsInProgress != null && snapshotsInProgress.snapshot(entry.getKey()) != null) {
+ for (Map.Entry entry : shardSnapshots.entrySet()) {
+ final Snapshot snapshot = entry.getKey();
+ if (snapshotsInProgress != null && snapshotsInProgress.snapshot(snapshot) != null) {
survivors.put(entry.getKey(), entry.getValue());
}
}
// For now we will be mostly dealing with a single snapshot at a time but might have multiple simultaneously running
// snapshots in the future
- Map> newSnapshots = new HashMap<>();
+ Map> newSnapshots = new HashMap<>();
// Now go through all snapshots and update existing or create missing
final String localNodeId = clusterService.localNode().getId();
if (snapshotsInProgress != null) {
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
if (entry.state() == SnapshotsInProgress.State.STARTED) {
Map startedShards = new HashMap<>();
- SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshotId());
+ SnapshotShards snapshotShards = shardSnapshots.get(entry.snapshot());
for (ObjectObjectCursor shard : entry.shards()) {
// Add all new shards to start processing on
if (localNodeId.equals(shard.value.nodeId())) {
@@ -223,7 +223,7 @@ public class SnapshotShardsService extends AbstractLifecycleComponent shards = new HashMap<>();
@@ -231,15 +231,15 @@ public class SnapshotShardsService extends AbstractLifecycleComponent shard : entry.shards()) {
IndexShardSnapshotStatus snapshotStatus = snapshotShards.shards.get(shard.key);
@@ -250,16 +250,16 @@ public class SnapshotShardsService extends AbstractLifecycleComponent> entry : newSnapshots.entrySet()) {
+ for (final Map.Entry> entry : newSnapshots.entrySet()) {
for (final Map.Entry shardEntry : entry.getValue().entrySet()) {
final ShardId shardId = shardEntry.getKey();
try {
@@ -318,11 +318,11 @@ public class SnapshotShardsService extends AbstractLifecycleComponent localShards = currentSnapshotShards(snapshot.snapshotId());
+ Map localShards = currentSnapshotShards(snapshot.snapshot());
if (localShards != null) {
ImmutableOpenMap masterShards = snapshot.shards();
for(Map.Entry localShard : localShards.entrySet()) {
@@ -380,13 +380,13 @@ public class SnapshotShardsService extends AbstractLifecycleComponentOnce shard snapshot is created data node updates state of the shard in the cluster state using the {@link SnapshotShardsService#updateIndexShardSnapshotStatus} method
* - When last shard is completed master node in {@link SnapshotShardsService#innerUpdateSnapshotState} method marks the snapshot as completed
* - After cluster state is updated, the {@link #endSnapshot(SnapshotsInProgress.Entry)} finalizes snapshot in the repository,
- * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(SnapshotId, SnapshotInfo, Throwable)} to remove snapshot from cluster state
+ * notifies all {@link #snapshotCompletionListeners} that snapshot is completed, and finally calls {@link #removeSnapshotFromClusterState(Snapshot, SnapshotInfo, Throwable)} to remove snapshot from cluster state
*
*/
public class SnapshotsService extends AbstractLifecycleComponent implements ClusterStateListener {
@@ -121,49 +124,67 @@ public class SnapshotsService extends AbstractLifecycleComponent snapshotIds(final String repositoryName) {
+ Repository repository = repositoriesService.repository(repositoryName);
+ assert repository != null; // should only be called once we've validated the repository exists
+ return repository.snapshots();
+ }
+
/**
* Retrieves snapshot from repository
*
- * @param snapshotId snapshot id
+ * @param repositoryName repository name
+ * @param snapshotId snapshot id
* @return snapshot
* @throws SnapshotMissingException if snapshot is not found
*/
- public SnapshotInfo snapshot(SnapshotId snapshotId) {
- validate(snapshotId);
- List entries = currentSnapshots(snapshotId.getRepository(), new String[]{snapshotId.getSnapshot()});
+ public SnapshotInfo snapshot(final String repositoryName, final SnapshotId snapshotId) {
+ List entries = currentSnapshots(repositoryName, Arrays.asList(snapshotId.getName()));
if (!entries.isEmpty()) {
return inProgressSnapshot(entries.iterator().next());
}
- return repositoriesService.repository(snapshotId.getRepository()).readSnapshot(snapshotId);
+ return repositoriesService.repository(repositoryName).readSnapshot(snapshotId);
}
/**
* Returns a list of snapshots from repository sorted by snapshot creation date
*
* @param repositoryName repository name
+ * @param snapshotIds snapshots for which to fetch snapshot information
+ * @param ignoreUnavailable if true, snapshots that could not be read will only be logged with a warning,
+ * if false, they will throw an error
* @return list of snapshots
*/
- public List snapshots(String repositoryName, boolean ignoreUnavailable) {
- Set snapshotSet = new HashSet<>();
- List entries = currentSnapshots(repositoryName, null);
+ public List snapshots(final String repositoryName, List snapshotIds, final boolean ignoreUnavailable) {
+ final Set snapshotSet = new HashSet<>();
+ final Set snapshotIdsToIterate = new HashSet<>(snapshotIds);
+ // first, look at the snapshots in progress
+ final List entries =
+ currentSnapshots(repositoryName, snapshotIdsToIterate.stream().map(SnapshotId::getName).collect(Collectors.toList()));
for (SnapshotsInProgress.Entry entry : entries) {
snapshotSet.add(inProgressSnapshot(entry));
+ snapshotIdsToIterate.remove(entry.snapshot().getSnapshotId());
}
- Repository repository = repositoriesService.repository(repositoryName);
- List snapshotIds = repository.snapshots();
- for (SnapshotId snapshotId : snapshotIds) {
+ // then, look in the repository
+ final Repository repository = repositoriesService.repository(repositoryName);
+ for (SnapshotId snapshotId : snapshotIdsToIterate) {
try {
snapshotSet.add(repository.readSnapshot(snapshotId));
} catch (Exception ex) {
if (ignoreUnavailable) {
logger.warn("failed to get snapshot [{}]", ex, snapshotId);
} else {
- throw new SnapshotException(snapshotId, "Snapshot could not be read", ex);
+ throw new SnapshotException(repositoryName, snapshotId, "Snapshot could not be read", ex);
}
}
}
-
- ArrayList snapshotList = new ArrayList<>(snapshotSet);
+ final ArrayList snapshotList = new ArrayList<>(snapshotSet);
CollectionUtil.timSort(snapshotList);
return Collections.unmodifiableList(snapshotList);
}
@@ -174,9 +195,9 @@ public class SnapshotsService extends AbstractLifecycleComponent currentSnapshots(String repositoryName) {
+ public List currentSnapshots(final String repositoryName) {
List snapshotList = new ArrayList<>();
- List entries = currentSnapshots(repositoryName, null);
+ List entries = currentSnapshots(repositoryName, Collections.emptyList());
for (SnapshotsInProgress.Entry entry : entries) {
snapshotList.add(inProgressSnapshot(entry));
}
@@ -194,8 +215,11 @@ public class SnapshotsService extends AbstractLifecycleComponent indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices()));
- logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices);
- newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), request.partial(), State.INIT, indices, System.currentTimeMillis(), null);
+ logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices);
+ newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId),
+ request.includeGlobalState(),
+ request.partial(),
+ State.INIT,
+ indices,
+ System.currentTimeMillis(),
+ null);
snapshots = new SnapshotsInProgress(newSnapshot);
} else {
// TODO: What should we do if a snapshot is already running?
- throw new ConcurrentSnapshotExecutionException(snapshotId, "a snapshot is already running");
+ throw new ConcurrentSnapshotExecutionException(repositoryName, snapshotName, "a snapshot is already running");
}
return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, snapshots).build();
}
@Override
public void onFailure(String source, Throwable t) {
- logger.warn("[{}][{}] failed to create snapshot", t, request.repository(), request.name());
+ logger.warn("[{}][{}] failed to create snapshot", t, repositoryName, snapshotName);
newSnapshot = null;
listener.onFailure(t);
}
@@ -228,12 +258,9 @@ public class SnapshotsService extends AbstractLifecycleComponent
+ beginSnapshot(newState, newSnapshot, request.partial(), listener)
+ );
}
}
@@ -253,34 +280,36 @@ public class SnapshotsService extends AbstractLifecycleComponent entries = new ArrayList<>();
for (SnapshotsInProgress.Entry entry : snapshots.entries()) {
- if (entry.snapshotId().equals(snapshot.snapshotId())) {
+ if (entry.snapshot().equals(snapshot.snapshot())) {
// Replace the snapshot that was just created
ImmutableOpenMap shards = shards(currentState, entry.indices());
if (!partial) {
@@ -362,13 +394,15 @@ public class SnapshotsService extends AbstractLifecycleComponentemptyList());
+ repositoriesService.repository(snapshot.snapshot().getRepository())
+ .finalizeSnapshot(snapshot.snapshot().getSnapshotId(),
+ snapshot.indices(),
+ snapshot.startTime(),
+ ExceptionsHelper.detailedMessage(t),
+ 0,
+ Collections.emptyList());
} catch (Throwable t2) {
- logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshotId());
+ logger.warn("[{}] failed to close snapshot in repository", snapshot.snapshot());
}
}
userCreateSnapshotListener.onFailure(t);
@@ -433,7 +472,7 @@ public class SnapshotsService extends AbstractLifecycleComponent
*
* @param repository repository id
- * @param snapshots optional list of snapshots that will be used as a filter
+ * @param snapshots list of snapshots that will be used as a filter, empty list means no snapshots are filtered
* @return list of metadata for currently running snapshots
*/
- public List currentSnapshots(String repository, String[] snapshots) {
+ public List currentSnapshots(final String repository, final List snapshots) {
SnapshotsInProgress snapshotsInProgress = clusterService.state().custom(SnapshotsInProgress.TYPE);
if (snapshotsInProgress == null || snapshotsInProgress.entries().isEmpty()) {
return Collections.emptyList();
@@ -458,12 +497,12 @@ public class SnapshotsService extends AbstractLifecycleComponent 0) {
+ if (snapshots.isEmpty() == false) {
for (String snapshot : snapshots) {
- if (entry.snapshotId().getSnapshot().equals(snapshot)) {
+ if (entry.snapshot().getSnapshotId().getName().equals(snapshot)) {
return snapshotsInProgress.entries();
}
}
@@ -474,12 +513,12 @@ public class SnapshotsService extends AbstractLifecycleComponent builder = new ArrayList<>();
for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
- if (!entry.snapshotId().getRepository().equals(repository)) {
+ if (entry.snapshot().getRepository().equals(repository) == false) {
continue;
}
- if (snapshots != null && snapshots.length > 0) {
+ if (snapshots.isEmpty() == false) {
for (String snapshot : snapshots) {
- if (entry.snapshotId().getSnapshot().equals(snapshot)) {
+ if (entry.snapshot().getSnapshotId().getName().equals(snapshot)) {
builder.add(entry);
break;
}
@@ -494,34 +533,35 @@ public class SnapshotsService extends AbstractLifecycleComponent
- * This method is executed on master node and it's complimentary to the {@link SnapshotShardsService#currentSnapshotShards(SnapshotId)} because it
+ * This method is executed on master node and it's complimentary to the {@link SnapshotShardsService#currentSnapshotShards(Snapshot)} because it
* returns similar information but for already finished snapshots.
*
*
- * @param snapshotId snapshot id
+ * @param repositoryName repository name
+ * @param snapshotInfo snapshot info
* @return map of shard id to snapshot status
*/
- public Map snapshotShards(SnapshotId snapshotId) throws IOException {
- validate(snapshotId);
+ public Map snapshotShards(final String repositoryName,
+ final SnapshotInfo snapshotInfo) throws IOException {
Map shardStatus = new HashMap<>();
- Repository repository = repositoriesService.repository(snapshotId.getRepository());
- IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(snapshotId.getRepository());
- SnapshotInfo snapshot = repository.readSnapshot(snapshotId);
- MetaData metaData = repository.readSnapshotMetaData(snapshotId, snapshot, snapshot.indices());
- for (String index : snapshot.indices()) {
+ Repository repository = repositoriesService.repository(repositoryName);
+ IndexShardRepository indexShardRepository = repositoriesService.indexShardRepository(repositoryName);
+ MetaData metaData = repository.readSnapshotMetaData(snapshotInfo, snapshotInfo.indices());
+ for (String index : snapshotInfo.indices()) {
IndexMetaData indexMetaData = metaData.indices().get(index);
if (indexMetaData != null) {
int numberOfShards = indexMetaData.getNumberOfShards();
for (int i = 0; i < numberOfShards; i++) {
ShardId shardId = new ShardId(indexMetaData.getIndex(), i);
- SnapshotShardFailure shardFailure = findShardFailure(snapshot.shardFailures(), shardId);
+ SnapshotShardFailure shardFailure = findShardFailure(snapshotInfo.shardFailures(), shardId);
if (shardFailure != null) {
IndexShardSnapshotStatus shardSnapshotStatus = new IndexShardSnapshotStatus();
shardSnapshotStatus.updateStage(IndexShardSnapshotStatus.Stage.FAILURE);
shardSnapshotStatus.failure(shardFailure.reason());
shardStatus.put(shardId, shardSnapshotStatus);
} else {
- IndexShardSnapshotStatus shardSnapshotStatus = indexShardRepository.snapshotStatus(snapshotId, snapshot.version(), shardId);
+ IndexShardSnapshotStatus shardSnapshotStatus =
+ indexShardRepository.snapshotStatus(snapshotInfo.snapshotId(), snapshotInfo.version(), shardId);
shardStatus.put(shardId, shardSnapshotStatus);
}
}
@@ -606,15 +646,15 @@ public class SnapshotsService extends AbstractLifecycleComponent failures = new ArrayList<>();
ArrayList shardFailures = new ArrayList<>();
for (ObjectObjectCursor shardStatus : entry.shards()) {
@@ -824,11 +864,11 @@ public class SnapshotsService extends AbstractLifecycleComponent listener) {
clusterService.submitStateUpdateTask("remove snapshot metadata", new ClusterStateUpdateTask() {
@Override
@@ -863,7 +903,7 @@ public class SnapshotsService extends AbstractLifecycleComponent entries = new ArrayList<>();
for (SnapshotsInProgress.Entry entry : snapshots.entries()) {
- if (entry.snapshotId().equals(snapshotId)) {
+ if (entry.snapshot().equals(snapshot)) {
changed = true;
} else {
entries.add(entry);
@@ -879,7 +919,7 @@ public class SnapshotsService extends AbstractLifecycleComponent matchedEntry = repository.snapshots().stream().filter(s -> s.getName().equals(snapshotName)).findFirst();
+ // if nothing found by the same name, then look in the cluster state for current in progress snapshots
+ if (matchedEntry.isPresent() == false) {
+ matchedEntry = currentSnapshots(repositoryName, Collections.emptyList()).stream()
+ .map(e -> e.snapshot().getSnapshotId()).filter(s -> s.getName().equals(snapshotName)).findFirst();
+ }
+ if (matchedEntry.isPresent() == false) {
+ throw new SnapshotMissingException(repositoryName, snapshotName);
+ }
+ deleteSnapshot(new Snapshot(repositoryName, matchedEntry.get()), listener);
+ }
+
/**
* Deletes snapshot from repository.
*
* If the snapshot is still running cancels the snapshot first and then deletes it from the repository.
*
- * @param snapshotId snapshot id
- * @param listener listener
+ * @param snapshot snapshot
+ * @param listener listener
*/
- public void deleteSnapshot(final SnapshotId snapshotId, final DeleteSnapshotListener listener) {
- validate(snapshotId);
+ public void deleteSnapshot(final Snapshot snapshot, final DeleteSnapshotListener listener) {
clusterService.submitStateUpdateTask("delete snapshot", new ClusterStateUpdateTask() {
boolean waitForSnapshot = false;
@@ -926,22 +988,22 @@ public class SnapshotsService extends AbstractLifecycleComponent shards;
- if (snapshot.state() == State.STARTED && snapshot.shards() != null) {
+ if (snapshotEntry.state() == State.STARTED && snapshotEntry.shards() != null) {
// snapshot is currently running - stop started shards
ImmutableOpenMap.Builder shardsBuilder = ImmutableOpenMap.builder();
- for (ObjectObjectCursor shardEntry : snapshot.shards()) {
+ for (ObjectObjectCursor shardEntry : snapshotEntry.shards()) {
ShardSnapshotStatus status = shardEntry.value;
if (!status.state().completed()) {
shardsBuilder.put(shardEntry.key, new ShardSnapshotStatus(status.nodeId(), State.ABORTED));
@@ -950,14 +1012,14 @@ public class SnapshotsService extends AbstractLifecycleComponent shardStatus : snapshot.shards().values()) {
+ for (ObjectCursor shardStatus : snapshotEntry.shards().values()) {
// Check if we still have shard running on existing nodes
if (shardStatus.value.state().completed() == false && shardStatus.value.nodeId() != null
&& currentState.nodes().get(shardStatus.value.nodeId()) != null) {
@@ -972,11 +1034,11 @@ public class SnapshotsService extends AbstractLifecycleComponent {
+ try {
+ Repository repository = repositoriesService.repository(snapshot.getRepository());
+ repository.deleteSnapshot(snapshot.getSnapshotId());
+ listener.onResponse();
+ } catch (Throwable t) {
+ listener.onFailure(t);
}
});
}
@@ -1203,7 +1262,7 @@ public class SnapshotsService extends AbstractLifecycleComponent repoVersions = listRepoVersions();
+ // run the test for each supported version
+ for (final String version : repoVersions) {
+ final String repoName = "test-repo-" + version;
+ logger.info("--> creating repository [{}] for version [{}]", repoName, version);
+ createRepository(version, repoName);
+
+ logger.info("--> get the snapshots");
+ final String originalIndex = "index-" + version;
+ final Set indices = Sets.newHashSet(originalIndex);
+ final Set snapshotInfos = Sets.newHashSet(getSnapshots(repoName));
+ assertThat(snapshotInfos.size(), equalTo(1));
+ SnapshotInfo originalSnapshot = snapshotInfos.iterator().next();
+ assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", SnapshotId.UNASSIGNED_UUID)));
+ assertThat(Sets.newHashSet(originalSnapshot.indices()), equalTo(indices));
+
+ logger.info("--> restore the original snapshot");
+ final Set restoredIndices = Sets.newHashSet(
+ restoreSnapshot(repoName, originalSnapshot.snapshotId().getName())
+ );
+ assertThat(restoredIndices, equalTo(indices));
+ // make sure it has documents
+ for (final String searchIdx : restoredIndices) {
+ assertThat(client().prepareSearch(searchIdx).setSize(0).get().getHits().totalHits(), greaterThan(0L));
+ }
+ deleteIndices(restoredIndices); // delete so we can restore again later
+
+ final String snapshotName2 = "test_2";
+ logger.info("--> take a new snapshot of the old index");
+ final int addedDocSize = 10;
+ for (int i = 0; i < addedDocSize; i++) {
+ index(originalIndex, "doc", Integer.toString(i), "foo", "new-bar-" + i);
+ }
+ refresh();
+ snapshotInfos.add(createSnapshot(repoName, snapshotName2));
+
+ logger.info("--> get the snapshots with the newly created snapshot [{}]", snapshotName2);
+ Set snapshotInfosFromRepo = Sets.newHashSet(getSnapshots(repoName));
+ assertThat(snapshotInfosFromRepo, equalTo(snapshotInfos));
+ snapshotInfosFromRepo.forEach(snapshotInfo -> {
+ assertThat(Sets.newHashSet(snapshotInfo.indices()), equalTo(indices));
+ });
+
+ final String snapshotName3 = "test_3";
+ final String indexName2 = "index2";
+ logger.info("--> take a new snapshot with a new index");
+ createIndex(indexName2);
+ indices.add(indexName2);
+ for (int i = 0; i < addedDocSize; i++) {
+ index(indexName2, "doc", Integer.toString(i), "foo", "new-bar-" + i);
+ }
+ refresh();
+ snapshotInfos.add(createSnapshot(repoName, snapshotName3));
+
+ logger.info("--> get the snapshots with the newly created snapshot [{}]", snapshotName3);
+ snapshotInfosFromRepo = Sets.newHashSet(getSnapshots(repoName));
+ assertThat(snapshotInfosFromRepo, equalTo(snapshotInfos));
+ snapshotInfosFromRepo.forEach(snapshotInfo -> {
+ if (snapshotInfo.snapshotId().getName().equals(snapshotName3)) {
+ // only the last snapshot has all the indices
+ assertThat(Sets.newHashSet(snapshotInfo.indices()), equalTo(indices));
+ } else {
+ assertThat(Sets.newHashSet(snapshotInfo.indices()), equalTo(Sets.newHashSet(originalIndex)));
+ }
+ });
+ deleteIndices(indices); // clean up indices
+
+ logger.info("--> restore the old snapshot again");
+ Set oldRestoredIndices = Sets.newHashSet(restoreSnapshot(repoName, originalSnapshot.snapshotId().getName()));
+ assertThat(oldRestoredIndices, equalTo(Sets.newHashSet(originalIndex)));
+ for (final String searchIdx : oldRestoredIndices) {
+ assertThat(client().prepareSearch(searchIdx).setSize(0).get().getHits().totalHits(),
+ greaterThanOrEqualTo((long)addedDocSize));
+ }
+ deleteIndices(oldRestoredIndices);
+
+ logger.info("--> restore the new snapshot");
+ Set newSnapshotIndices = Sets.newHashSet(restoreSnapshot(repoName, snapshotName3));
+ assertThat(newSnapshotIndices, equalTo(Sets.newHashSet(originalIndex, indexName2)));
+ for (final String searchIdx : newSnapshotIndices) {
+ assertThat(client().prepareSearch(searchIdx).setSize(0).get().getHits().totalHits(),
+ greaterThanOrEqualTo((long)addedDocSize));
+ }
+ deleteIndices(newSnapshotIndices); // clean up indices before starting again
+ }
+ }
+
+ private List listRepoVersions() throws Exception {
+ final String prefix = "repo";
+ final List repoVersions = new ArrayList<>();
+ final Path repoFiles = getBwcIndicesPath();
+ try (final DirectoryStream dirStream = Files.newDirectoryStream(repoFiles, prefix + "-*.zip")) {
+ for (final Path entry : dirStream) {
+ final String fileName = entry.getFileName().toString();
+ String version = fileName.substring(prefix.length() + 1);
+ version = version.substring(0, version.length() - ".zip".length());
+ repoVersions.add(version);
+ }
+ }
+ return Collections.unmodifiableList(repoVersions);
+ }
+
+ private void createRepository(final String version, final String repoName) throws Exception {
+ final String prefix = "repo";
+ final Path repoFile = getBwcIndicesPath().resolve(prefix + "-" + version + ".zip");
+ final Path repoPath = randomRepoPath();
+ FileTestUtils.unzip(repoFile, repoPath, "repo/");
+ assertAcked(client().admin().cluster().preparePutRepository(repoName)
+ .setType("fs")
+ .setSettings(Settings.builder().put("location", repoPath)));
+ }
+
+ private List getSnapshots(final String repoName) throws Exception {
+ return client().admin().cluster().prepareGetSnapshots(repoName)
+ .addSnapshots("_all")
+ .get()
+ .getSnapshots();
+ }
+
+ private SnapshotInfo createSnapshot(final String repoName, final String snapshotName) throws Exception {
+ return client().admin().cluster().prepareCreateSnapshot(repoName, snapshotName)
+ .setWaitForCompletion(true)
+ .get()
+ .getSnapshotInfo();
+ }
+
+ private List restoreSnapshot(final String repoName, final String snapshotName) throws Exception {
+ return client().admin().cluster().prepareRestoreSnapshot(repoName, snapshotName)
+ .setWaitForCompletion(true)
+ .get()
+ .getRestoreInfo()
+ .indices();
+ }
+
+ private void deleteIndices(final Set indices) throws Exception {
+ client().admin().indices().prepareDelete(indices.toArray(new String[indices.size()])).get();
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java
index ad16267cde9..419104bfe34 100644
--- a/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java
+++ b/core/src/test/java/org/elasticsearch/bwcompat/RestoreBackwardsCompatIT.java
@@ -40,7 +40,6 @@ import org.elasticsearch.test.ESIntegTestCase.Scope;
import org.elasticsearch.test.VersionUtils;
import java.io.IOException;
-import java.lang.reflect.Modifier;
import java.net.URI;
import java.net.URISyntaxException;
import java.nio.file.DirectoryStream;
diff --git a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java
index 1d013737a30..b82b5e0ba60 100644
--- a/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java
+++ b/core/src/test/java/org/elasticsearch/cluster/ClusterStateDiffIT.java
@@ -30,7 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@@ -51,6 +51,7 @@ import org.elasticsearch.gateway.GatewayService;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESIntegTestCase;
import java.util.Collections;
@@ -653,7 +654,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
switch (randomIntBetween(0, 1)) {
case 0:
return new SnapshotsInProgress(new SnapshotsInProgress.Entry(
- new SnapshotId(randomName("repo"), randomName("snap")),
+ new Snapshot(randomName("repo"), new SnapshotId(randomName("snap"), UUIDs.randomBase64UUID())),
randomBoolean(),
randomBoolean(),
SnapshotsInProgress.State.fromValue((byte) randomIntBetween(0, 6)),
@@ -662,7 +663,7 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
ImmutableOpenMap.of()));
case 1:
return new RestoreInProgress(new RestoreInProgress.Entry(
- new SnapshotId(randomName("repo"), randomName("snap")),
+ new Snapshot(randomName("repo"), new SnapshotId(randomName("snap"), UUIDs.randomBase64UUID())),
RestoreInProgress.State.fromValue((byte) randomIntBetween(0, 3)),
emptyList(),
ImmutableOpenMap.of()));
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java
index 31f48a68654..7267252b19f 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/ShardRoutingTests.java
@@ -20,18 +20,15 @@
package org.elasticsearch.cluster.routing;
import org.elasticsearch.Version;
-import org.elasticsearch.cluster.ClusterState;
-import org.elasticsearch.cluster.metadata.IndexMetaData;
-import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.common.io.stream.BytesStreamOutput;
import org.elasticsearch.common.io.stream.StreamInput;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESTestCase;
import java.io.IOException;
-import static org.hamcrest.Matchers.equalTo;
-
public class ShardRoutingTests extends ESTestCase {
public void testIsSameAllocation() {
@@ -155,8 +152,8 @@ public class ShardRoutingTests extends ESTestCase {
case 4:
// change restore source
otherRouting = TestShardRouting.newShardRouting(otherRouting.getIndexName(), otherRouting.id(), otherRouting.currentNodeId(), otherRouting.relocatingNodeId(),
- otherRouting.restoreSource() == null ? new RestoreSource(new SnapshotId("test", "s1"), Version.CURRENT, "test") :
- new RestoreSource(otherRouting.restoreSource().snapshotId(), Version.CURRENT, otherRouting.index() + "_1"),
+ otherRouting.restoreSource() == null ? new RestoreSource(new Snapshot("test", new SnapshotId("s1", UUIDs.randomBase64UUID())), Version.CURRENT, "test") :
+ new RestoreSource(otherRouting.restoreSource().snapshot(), Version.CURRENT, otherRouting.index() + "_1"),
otherRouting.primary(), otherRouting.state(), otherRouting.unassignedInfo());
break;
case 5:
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java
index a0ef14ee98d..82703c6f29e 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/UnassignedInfoTests.java
@@ -26,7 +26,8 @@ import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
@@ -35,6 +36,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESAllocationTestCase;
import java.util.Collections;
@@ -130,7 +132,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metaData(metaData)
- .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"), Version.CURRENT, "test"), new IntHashSet()).build()).build();
+ .routingTable(RoutingTable.builder().addAsNewRestore(metaData.index("test"), new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test"), new IntHashSet()).build()).build();
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.NEW_INDEX_RESTORED));
}
@@ -142,7 +144,7 @@ public class UnassignedInfoTests extends ESAllocationTestCase {
.build();
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT)
.metaData(metaData)
- .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"), Version.CURRENT, "test")).build()).build();
+ .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())), Version.CURRENT, "test")).build()).build();
for (ShardRouting shard : clusterState.getRoutingNodes().shardsWithState(UNASSIGNED)) {
assertThat(shard.unassignedInfo().getReason(), equalTo(UnassignedInfo.Reason.EXISTING_INDEX_RESTORED));
}
diff --git a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
index 8eb3e2f8a8b..54df74db1f5 100644
--- a/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
+++ b/core/src/test/java/org/elasticsearch/cluster/routing/allocation/NodeVersionAllocationDeciderTests.java
@@ -25,7 +25,8 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.EmptyClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.IndexRoutingTable;
@@ -48,6 +49,7 @@ import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.index.shard.ShardId;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESAllocationTestCase;
import org.elasticsearch.test.VersionUtils;
import org.elasticsearch.test.gateway.NoopGatewayAllocator;
@@ -360,7 +362,8 @@ public class NodeVersionAllocationDeciderTests extends ESAllocationTestCase {
ClusterState state = ClusterState.builder(ClusterName.DEFAULT)
.metaData(metaData)
- .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"), new RestoreSource(new SnapshotId("rep1", "snp1"),
+ .routingTable(RoutingTable.builder().addAsRestore(metaData.index("test"),
+ new RestoreSource(new Snapshot("rep1", new SnapshotId("snp1", UUIDs.randomBase64UUID())),
Version.CURRENT, "test")).build())
.nodes(DiscoveryNodes.builder().put(newNode).put(oldNode1).put(oldNode2)).build();
AllocationDeciders allocationDeciders = new AllocationDeciders(Settings.EMPTY, new AllocationDecider[]{
diff --git a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
index ee1cf7280e7..3e088f4f82e 100644
--- a/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
+++ b/core/src/test/java/org/elasticsearch/gateway/PrimaryShardAllocatorTests.java
@@ -24,7 +24,7 @@ import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.RestoreSource;
@@ -40,6 +40,7 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardStateMetaData;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESAllocationTestCase;
import org.junit.Before;
@@ -339,8 +340,9 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.putActiveAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
.build();
+ final Snapshot snapshot = new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID()));
RoutingTable routingTable = RoutingTable.builder()
- .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), version, shardId.getIndexName()))
+ .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(snapshot, version, shardId.getIndexName()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
.metaData(metaData)
@@ -419,7 +421,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
.build();
RoutingTable routingTable = RoutingTable.builder()
- .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new SnapshotId("test", "test"), Version.CURRENT, shardId.getIndexName()))
+ .addAsRestore(metaData.index(shardId.getIndex()), new RestoreSource(new Snapshot("test", new SnapshotId("test", UUIDs.randomBase64UUID())), Version.CURRENT, shardId.getIndexName()))
.build();
ClusterState state = ClusterState.builder(org.elasticsearch.cluster.ClusterName.DEFAULT)
.metaData(metaData)
diff --git a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
index 52aa07b3eb3..7774537c734 100644
--- a/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
+++ b/core/src/test/java/org/elasticsearch/index/shard/IndexShardTests.java
@@ -49,7 +49,8 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.AllocationId;
import org.elasticsearch.cluster.routing.RestoreSource;
@@ -97,6 +98,7 @@ import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.plugins.Plugin;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.DummyShardLock;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.FieldMaskingReader;
@@ -1110,7 +1112,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
client().admin().indices().prepareFlush("test").get(); // only flush test
final ShardRouting origRouting = test_target.getShardOrNull(0).routingEntry();
ShardRouting routing = ShardRoutingHelper.reinit(origRouting);
- routing = ShardRoutingHelper.newWithRestoreSource(routing, new RestoreSource(new SnapshotId("foo", "bar"), Version.CURRENT, "test"));
+ final Snapshot snapshot = new Snapshot("foo", new SnapshotId("bar", UUIDs.randomBase64UUID()));
+ routing = ShardRoutingHelper.newWithRestoreSource(routing, new RestoreSource(snapshot, Version.CURRENT, "test"));
test_target.removeShard(0, "just do it man!");
final IndexShard test_target_shard = test_target.createShard(routing);
Store sourceStore = test_shard.store();
diff --git a/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
new file mode 100644
index 00000000000..3d46c0bbacf
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/repositories/blobstore/BlobStoreRepositoryTests.java
@@ -0,0 +1,230 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.repositories.blobstore;
+
+import org.elasticsearch.action.admin.cluster.repositories.put.PutRepositoryResponse;
+import org.elasticsearch.action.admin.cluster.snapshots.create.CreateSnapshotResponse;
+import org.elasticsearch.client.Client;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.common.bytes.BytesReference;
+import org.elasticsearch.common.collect.Tuple;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
+import org.elasticsearch.common.io.stream.StreamOutput;
+import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.common.util.set.Sets;
+import org.elasticsearch.common.xcontent.XContentBuilder;
+import org.elasticsearch.common.xcontent.XContentFactory;
+import org.elasticsearch.common.xcontent.XContentType;
+import org.elasticsearch.repositories.RepositoriesService;
+import org.elasticsearch.snapshots.SnapshotId;
+import org.elasticsearch.test.ESIntegTestCase;
+import org.elasticsearch.test.ESSingleNodeTestCase;
+
+import java.nio.file.Path;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.stream.Collectors;
+
+import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.blobId;
+import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.parseNameUUIDFromBlobName;
+import static org.hamcrest.Matchers.equalTo;
+
+/**
+ * Tests for the {@link BlobStoreRepository} and its subclasses.
+ */
+public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
+
+ public void testRetrieveSnapshots() throws Exception {
+ final Client client = client();
+ final Path location = ESIntegTestCase.randomRepoPath(node().settings());
+ final String repositoryName = "test-repo";
+
+ logger.info("--> creating repository");
+ PutRepositoryResponse putRepositoryResponse =
+ client.admin().cluster().preparePutRepository(repositoryName)
+ .setType("fs")
+ .setSettings(Settings.builder().put(node().settings()).put("location", location))
+ .get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ logger.info("--> creating an index and indexing documents");
+ final String indexName = "test-idx";
+ createIndex(indexName);
+ ensureGreen();
+ int numDocs = randomIntBetween(10, 20);
+ for (int i = 0; i < numDocs; i++) {
+ String id = Integer.toString(i);
+ client().prepareIndex(indexName, "type1", id).setSource("text", "sometext").get();
+ }
+ client().admin().indices().prepareFlush(indexName).setWaitIfOngoing(true).get();
+
+ logger.info("--> create first snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(repositoryName, "test-snap-1")
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ final SnapshotId snapshotId1 = createSnapshotResponse.getSnapshotInfo().snapshotId();
+
+ logger.info("--> create second snapshot");
+ createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(repositoryName, "test-snap-2")
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ final SnapshotId snapshotId2 = createSnapshotResponse.getSnapshotInfo().snapshotId();
+
+ logger.info("--> make sure the node's repository can resolve the snapshots");
+ final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
+ @SuppressWarnings("unchecked") final BlobStoreRepository repository =
+ (BlobStoreRepository) repositoriesService.repository(repositoryName);
+ final List originalSnapshots = Arrays.asList(snapshotId1, snapshotId2);
+
+ List snapshotIds = repository.snapshots().stream()
+ .sorted((s1, s2) -> s1.getName().compareTo(s2.getName()))
+ .collect(Collectors.toList());
+ assertThat(snapshotIds, equalTo(originalSnapshots));
+ }
+
+ public void testSnapshotIndexFile() throws Exception {
+ final Client client = client();
+ final Path location = ESIntegTestCase.randomRepoPath(node().settings());
+ final String repositoryName = "test-repo";
+
+ PutRepositoryResponse putRepositoryResponse =
+ client.admin().cluster().preparePutRepository(repositoryName)
+ .setType("fs")
+ .setSettings(Settings.builder().put(node().settings()).put("location", location))
+ .get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
+ @SuppressWarnings("unchecked") final BlobStoreRepository repository =
+ (BlobStoreRepository) repositoriesService.repository(repositoryName);
+
+ // write to and read from a snapshot file with no entries
+ repository.writeSnapshotList(Collections.emptyList());
+ List readSnapshotIds = repository.readSnapshotList();
+ assertThat(readSnapshotIds.size(), equalTo(0));
+
+ // write to and read from a snapshot file with a random number of entries
+ final int numSnapshots = randomIntBetween(1, 1000);
+ final List snapshotIds = new ArrayList<>(numSnapshots);
+ for (int i = 0; i < numSnapshots; i++) {
+ snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
+ }
+ repository.writeSnapshotList(snapshotIds);
+ readSnapshotIds = repository.readSnapshotList();
+ assertThat(readSnapshotIds, equalTo(snapshotIds));
+ }
+
+ public void testOldIndexFileFormat() throws Exception {
+ final Client client = client();
+ final Path location = ESIntegTestCase.randomRepoPath(node().settings());
+ final String repositoryName = "test-repo";
+
+ PutRepositoryResponse putRepositoryResponse =
+ client.admin().cluster().preparePutRepository(repositoryName)
+ .setType("fs")
+ .setSettings(Settings.builder().put(node().settings()).put("location", location))
+ .get();
+ assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
+
+ final RepositoriesService repositoriesService = getInstanceFromNode(RepositoriesService.class);
+ @SuppressWarnings("unchecked") final BlobStoreRepository repository =
+ (BlobStoreRepository) repositoriesService.repository(repositoryName);
+
+ // write old index file format
+ final int numOldSnapshots = randomIntBetween(1, 50);
+ final List snapshotIds = new ArrayList<>();
+ for (int i = 0; i < numOldSnapshots; i++) {
+ snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), SnapshotId.UNASSIGNED_UUID));
+ }
+ writeOldFormat(repository, snapshotIds.stream().map(SnapshotId::getName).collect(Collectors.toList()));
+ List readSnapshotIds = repository.readSnapshotList();
+ assertThat(Sets.newHashSet(readSnapshotIds), equalTo(Sets.newHashSet(snapshotIds)));
+
+ // write to and read from a snapshot file with a random number of new entries added
+ final int numSnapshots = randomIntBetween(1, 1000);
+ for (int i = 0; i < numSnapshots; i++) {
+ snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
+ }
+ repository.writeSnapshotList(snapshotIds);
+ readSnapshotIds = repository.readSnapshotList();
+ assertThat(Sets.newHashSet(readSnapshotIds), equalTo(Sets.newHashSet(snapshotIds)));
+ }
+
+ public void testParseUUIDFromBlobName() {
+ String blobStr = "abc123";
+ Tuple pair = parseNameUUIDFromBlobName(blobStr);
+ assertThat(pair.v1(), equalTo(blobStr)); // snapshot name
+ assertThat(pair.v2(), equalTo(SnapshotId.UNASSIGNED_UUID)); // snapshot uuid
+ blobStr = "abcefghijklmnopqrstuvwxyz";
+ pair = parseNameUUIDFromBlobName(blobStr);
+ assertThat(pair.v1(), equalTo(blobStr));
+ assertThat(pair.v2(), equalTo(SnapshotId.UNASSIGNED_UUID));
+ blobStr = "abc123-xyz"; // not enough characters after '-' to have a uuid
+ pair = parseNameUUIDFromBlobName(blobStr);
+ assertThat(pair.v1(), equalTo(blobStr));
+ assertThat(pair.v2(), equalTo(SnapshotId.UNASSIGNED_UUID));
+ blobStr = "abc123-a1b2c3d4e5f6g7h8i9j0k1";
+ pair = parseNameUUIDFromBlobName(blobStr);
+ assertThat(pair.v1(), equalTo("abc123"));
+ assertThat(pair.v2(), equalTo("a1b2c3d4e5f6g7h8i9j0k1"));
+ }
+
+ public void testBlobId() {
+ SnapshotId snapshotId = new SnapshotId("abc123", SnapshotId.UNASSIGNED_UUID);
+ assertThat(blobId(snapshotId), equalTo("abc123")); // just the snapshot name
+ snapshotId = new SnapshotId("abc-123", SnapshotId.UNASSIGNED_UUID);
+ assertThat(blobId(snapshotId), equalTo("abc-123")); // just the snapshot name
+ String uuid = UUIDs.randomBase64UUID();
+ snapshotId = new SnapshotId("abc123", uuid);
+ assertThat(blobId(snapshotId), equalTo("abc123-" + uuid)); // snapshot name + '-' + uuid
+ uuid = UUIDs.randomBase64UUID();
+ snapshotId = new SnapshotId("abc-123", uuid);
+ assertThat(blobId(snapshotId), equalTo("abc-123-" + uuid)); // snapshot name + '-' + uuid
+ }
+
+ private void writeOldFormat(final BlobStoreRepository repository, final List snapshotNames) throws Exception {
+ final BytesReference bRef;
+ try (BytesStreamOutput bStream = new BytesStreamOutput()) {
+ try (StreamOutput stream = new OutputStreamStreamOutput(bStream)) {
+ XContentBuilder builder = XContentFactory.contentBuilder(XContentType.JSON, stream);
+ builder.startObject();
+ builder.startArray("snapshots");
+ for (final String snapshotName : snapshotNames) {
+ builder.value(snapshotName);
+ }
+ builder.endArray();
+ builder.endObject();
+ builder.close();
+ }
+ bRef = bStream.bytes();
+ }
+ repository.blobContainer().writeBlob(BlobStoreRepository.SNAPSHOTS_FILE, bRef); // write to index file
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java
index 978d226da40..b603ded8697 100644
--- a/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java
+++ b/core/src/test/java/org/elasticsearch/rest/action/cat/RestRecoveryActionTests.java
@@ -21,7 +21,8 @@ package org.elasticsearch.rest.action.cat;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.admin.indices.recovery.RecoveryResponse;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RestoreSource;
import org.elasticsearch.common.Randomness;
@@ -32,6 +33,7 @@ import org.elasticsearch.index.Index;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.rest.RestController;
+import org.elasticsearch.snapshots.Snapshot;
import org.elasticsearch.test.ESTestCase;
import java.util.ArrayList;
@@ -76,10 +78,9 @@ public class RestRecoveryActionTests extends ESTestCase {
final RestoreSource restoreSource = randomBoolean() ? mock(RestoreSource.class) : null;
if (restoreSource != null) {
- final SnapshotId snapshotId = mock(SnapshotId.class);
- when(snapshotId.getRepository()).thenReturn(randomAsciiOfLength(8));
- when(snapshotId.getSnapshot()).thenReturn(randomAsciiOfLength(8));
- when(restoreSource.snapshotId()).thenReturn(snapshotId);
+ final Snapshot snapshot = new Snapshot(randomAsciiOfLength(8),
+ new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
+ when(restoreSource.snapshot()).thenReturn(snapshot);
}
RecoveryState.Index index = mock(RecoveryState.Index.class);
@@ -166,10 +167,10 @@ public class RestRecoveryActionTests extends ESTestCase {
assertThat(cells.get(8).value, equalTo(state.getTargetNode().getName()));
assertThat(
cells.get(9).value,
- equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getRepository()));
+ equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getRepository()));
assertThat(
cells.get(10).value,
- equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshotId().getSnapshot()));
+ equalTo(state.getRestoreSource() == null ? "n/a" : state.getRestoreSource().snapshot().getSnapshotId().getName()));
assertThat(cells.get(11).value, equalTo(state.getIndex().totalRecoverFiles()));
assertThat(cells.get(12).value, equalTo(state.getIndex().recoveredFileCount()));
assertThat(cells.get(13).value, equalTo(percent(state.getIndex().recoveredFilesPercent())));
diff --git a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java
index 102173626f3..50fb3f9074b 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/AbstractSnapshotIntegTestCase.java
@@ -25,7 +25,6 @@ import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateListener;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
import org.elasticsearch.cluster.SnapshotsInProgress;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.PendingClusterTask;
@@ -108,18 +107,29 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
fail("Timeout!!!");
}
- public SnapshotInfo waitForCompletion(String repository, String snapshot, TimeValue timeout) throws InterruptedException {
+ public SnapshotInfo waitForCompletion(String repository, String snapshotName, TimeValue timeout) throws InterruptedException {
long start = System.currentTimeMillis();
- SnapshotId snapshotId = new SnapshotId(repository, snapshot);
while (System.currentTimeMillis() - start < timeout.millis()) {
- List snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshot).get().getSnapshots();
+ List snapshotInfos = client().admin().cluster().prepareGetSnapshots(repository).setSnapshots(snapshotName).get().getSnapshots();
assertThat(snapshotInfos.size(), equalTo(1));
if (snapshotInfos.get(0).state().completed()) {
// Make sure that snapshot clean up operations are finished
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
SnapshotsInProgress snapshotsInProgress = stateResponse.getState().custom(SnapshotsInProgress.TYPE);
- if (snapshotsInProgress == null || snapshotsInProgress.snapshot(snapshotId) == null) {
+ if (snapshotsInProgress == null) {
return snapshotInfos.get(0);
+ } else {
+ boolean found = false;
+ for (SnapshotsInProgress.Entry entry : snapshotsInProgress.entries()) {
+ final Snapshot curr = entry.snapshot();
+ if (curr.getRepository().equals(repository) && curr.getSnapshotId().getName().equals(snapshotName)) {
+ found = true;
+ break;
+ }
+ }
+ if (found == false) {
+ return snapshotInfos.get(0);
+ }
}
}
Thread.sleep(100);
@@ -128,12 +138,13 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
return null;
}
- public static String blockNodeWithIndex(String index) {
- for(String node : internalCluster().nodesInclude("test-idx")) {
- ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).blockOnDataFiles(true);
+ public static String blockNodeWithIndex(final String repositoryName, final String indexName) {
+ for(String node : internalCluster().nodesInclude(indexName)) {
+ ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository(repositoryName))
+ .blockOnDataFiles(true);
return node;
}
- fail("No nodes for the index " + index + " found");
+ fail("No nodes for the index " + indexName + " found");
return null;
}
@@ -163,8 +174,8 @@ public abstract class AbstractSnapshotIntegTestCase extends ESIntegTestCase {
}
}
- public static void unblockNode(String node) {
- ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository("test-repo")).unblock();
+ public static void unblockNode(final String repository, final String node) {
+ ((MockRepository)internalCluster().getInstance(RepositoriesService.class, node).repository(repository)).unblock();
}
protected void assertBusyPendingTasks(final String taskPrefix, final int expectedCount) throws Exception {
diff --git a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
index 34333a583af..3ba6c875b68 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/DedicatedClusterSnapshotRestoreIT.java
@@ -313,7 +313,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
// Pick one node and block it
- String blockedNode = blockNodeWithIndex("test-idx");
+ String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
@@ -322,7 +322,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
waitForBlock(blockedNode, "test-repo", TimeValue.timeValueSeconds(60));
logger.info("--> execution was blocked on node [{}], shutting it down", blockedNode);
- unblockNode(blockedNode);
+ unblockNode("test-repo", blockedNode);
logger.info("--> stopping node [{}]", blockedNode);
stopNode(blockedNode);
@@ -361,7 +361,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
assertThat(putRepositoryResponse.isAcknowledged(), equalTo(true));
// Pick one node and block it
- String blockedNode = blockNodeWithIndex("test-idx");
+ String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
// Remove it from the list of available nodes
nodes.remove(blockedNode);
@@ -377,7 +377,7 @@ public class DedicatedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTest
ListenableActionFuture deleteSnapshotResponseFuture = internalCluster().client(nodes.get(0)).admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").execute();
// Make sure that abort makes some progress
Thread.sleep(100);
- unblockNode(blockedNode);
+ unblockNode("test-repo", blockedNode);
logger.info("--> stopping node [{}]", blockedNode);
stopNode(blockedNode);
try {
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
index 3996aad25e3..19b46710fea 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/SharedClusterSnapshotRestoreIT.java
@@ -50,7 +50,6 @@ import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
-import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.ImmutableOpenMap;
@@ -73,9 +72,11 @@ import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
+import java.util.Locale;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
+import java.util.stream.Collectors;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
@@ -917,7 +918,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> truncate snapshot file to make it unreadable");
- Path snapshotPath = repo.resolve("snap-test-snap-1.dat");
+ Path snapshotPath = repo.resolve("snap-test-snap-1-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat");
try(SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) {
outChan.truncate(randomInt(10));
}
@@ -1120,7 +1121,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo(100L));
// Pick one node and block it
- String blockedNode = blockNodeWithIndex("test-idx");
+ String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
@@ -1133,7 +1134,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
client().admin().indices().prepareUpdateSettings("test-idx").setSettings(excludeSettings).get();
logger.info("--> unblocking blocked node");
- unblockNode(blockedNode);
+ unblockNode("test-repo", blockedNode);
logger.info("--> waiting for completion");
SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
@@ -1183,7 +1184,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo(100L));
// Pick one node and block it
- String blockedNode = blockNodeWithIndex("test-idx");
+ String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
@@ -1215,7 +1216,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
.setType("fs").setSettings(Settings.builder().put("location", repositoryLocation.resolve("test"))));
logger.info("--> unblocking blocked node");
- unblockNode(blockedNode);
+ unblockNode("test-repo", blockedNode);
logger.info("--> waiting for completion");
SnapshotInfo snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
@@ -1436,7 +1437,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().totalHits(), equalTo(100L));
// Pick one node and block it
- String blockedNode = blockNodeWithIndex("test-idx");
+ String blockedNode = blockNodeWithIndex("test-repo", "test-idx");
logger.info("--> snapshot");
client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(false).setIndices("test-idx").get();
@@ -1477,7 +1478,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(snapshotInfo.state(), equalTo(SnapshotState.IN_PROGRESS));
logger.info("--> unblocking blocked node");
- unblockNode(blockedNode);
+ unblockNode("test-repo", blockedNode);
snapshotInfo = waitForCompletion("test-repo", "test-snap", TimeValue.timeValueSeconds(600));
logger.info("Number of failed shards [{}]", snapshotInfo.shardFailures().size());
@@ -1904,7 +1905,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
} finally {
if (initBlocking) {
logger.info("--> unblock running master node");
- unblockNode(internalCluster().getMasterName());
+ unblockNode("test-repo", internalCluster().getMasterName());
} else {
logger.info("--> unblock all data nodes");
unblockAllDataNodes("test-repo");
@@ -1993,14 +1994,17 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
Client client = client();
logger.info("--> creating repository");
- assertAcked(client.admin().cluster().preparePutRepository("test-repo")
+ final String repositoryName = "test-repo";
+ assertAcked(client.admin().cluster().preparePutRepository(repositoryName)
.setType("mock").setSettings(Settings.builder()
.put("location", randomRepoPath())
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
));
- createIndex("test-idx");
+ logger.info("--> create the index");
+ final String idxName = "test-idx";
+ createIndex(idxName);
ensureGreen();
ClusterService clusterService = internalCluster().getInstance(ClusterService.class, internalCluster().getMasterName());
@@ -2008,7 +2012,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
final CountDownLatch countDownLatch = new CountDownLatch(1);
logger.info("--> snapshot");
- CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap").setWaitForCompletion(true).setIndices("test-idx").get();
+ final String snapshotName = "test-snap";
+ CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot(repositoryName, snapshotName).setWaitForCompletion(true).setIndices(idxName).get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
@@ -2020,11 +2025,18 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
public ClusterState execute(ClusterState currentState) {
// Simulate orphan snapshot
ImmutableOpenMap.Builder shards = ImmutableOpenMap.builder();
- shards.put(new ShardId("test-idx", "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED));
- shards.put(new ShardId("test-idx", "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED));
- shards.put(new ShardId("test-idx", "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED));
+ shards.put(new ShardId(idxName, "_na_", 0), new ShardSnapshotStatus("unknown-node", State.ABORTED));
+ shards.put(new ShardId(idxName, "_na_", 1), new ShardSnapshotStatus("unknown-node", State.ABORTED));
+ shards.put(new ShardId(idxName, "_na_", 2), new ShardSnapshotStatus("unknown-node", State.ABORTED));
List entries = new ArrayList<>();
- entries.add(new Entry(new SnapshotId("test-repo", "test-snap"), true, false, State.ABORTED, Collections.singletonList("test-idx"), System.currentTimeMillis(), shards.build()));
+ entries.add(new Entry(new Snapshot(repositoryName,
+ createSnapshotResponse.getSnapshotInfo().snapshotId()),
+ true,
+ false,
+ State.ABORTED,
+ Collections.singletonList(idxName),
+ System.currentTimeMillis(),
+ shards.build()));
return ClusterState.builder(currentState).putCustom(SnapshotsInProgress.TYPE, new SnapshotsInProgress(Collections.unmodifiableList(entries))).build();
}
@@ -2042,8 +2054,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
countDownLatch.await();
logger.info("--> try deleting the orphan snapshot");
- assertAcked(client.admin().cluster().prepareDeleteSnapshot("test-repo", "test-snap").get("10s"));
-
+ assertAcked(client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).get("10s"));
}
private boolean waitForIndex(final String index, TimeValue timeout) throws InterruptedException {
@@ -2143,33 +2154,14 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
.put("compress", randomBoolean())
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
- try {
- client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get();
- fail("shouldn't be here");
- } catch (InvalidSnapshotNameException ex) {
- assertThat(ex.getMessage(), containsString("Invalid snapshot name"));
- }
-
- try {
- client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo").get();
- fail("shouldn't be here");
- } catch (InvalidSnapshotNameException ex) {
- assertThat(ex.getMessage(), containsString("Invalid snapshot name"));
- }
-
- try {
- client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo").get();
- fail("shouldn't be here");
- } catch (InvalidSnapshotNameException ex) {
- assertThat(ex.getMessage(), containsString("Invalid snapshot name"));
- }
-
- try {
- client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo").get();
- fail("shouldn't be here");
- } catch (InvalidSnapshotNameException ex) {
- assertThat(ex.getMessage(), containsString("Invalid snapshot name"));
- }
+ expectThrows(InvalidSnapshotNameException.class,
+ () -> client.admin().cluster().prepareCreateSnapshot("test-repo", "_foo").get());
+ expectThrows(SnapshotMissingException.class,
+ () -> client.admin().cluster().prepareGetSnapshots("test-repo").setSnapshots("_foo").get());
+ expectThrows(SnapshotMissingException.class,
+ () -> client.admin().cluster().prepareDeleteSnapshot("test-repo", "_foo").get());
+ expectThrows(SnapshotMissingException.class,
+ () -> client.admin().cluster().prepareSnapshotStatus("test-repo").setSnapshots("_foo").get());
}
public void testListCorruptedSnapshot() throws Exception {
@@ -2199,7 +2191,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
logger.info("--> truncate snapshot file to make it unreadable");
- Path snapshotPath = repo.resolve("snap-test-snap-2.dat");
+ Path snapshotPath = repo.resolve("snap-test-snap-2-" + createSnapshotResponse.getSnapshotInfo().snapshotId().getUUID() + ".dat");
try(SeekableByteChannel outChan = Files.newByteChannel(snapshotPath, StandardOpenOption.WRITE)) {
outChan.truncate(randomInt(10));
}
@@ -2211,13 +2203,194 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
assertThat(snapshotInfos.size(), equalTo(1));
assertThat(snapshotInfos.get(0).state(), equalTo(SnapshotState.SUCCESS));
- assertThat(snapshotInfos.get(0).name(), equalTo("test-snap-1"));
+ assertThat(snapshotInfos.get(0).snapshotId().getName(), equalTo("test-snap-1"));
try {
client.admin().cluster().prepareGetSnapshots("test-repo").setIgnoreUnavailable(false).get().getSnapshots();
} catch (SnapshotException ex) {
- assertThat(ex.snapshot().getRepository(), equalTo("test-repo"));
- assertThat(ex.snapshot().getSnapshot(), equalTo("test-snap-2"));
+ assertThat(ex.getRepositoryName(), equalTo("test-repo"));
+ assertThat(ex.getSnapshotName(), equalTo("test-snap-2"));
}
}
+
+ public void testCannotCreateSnapshotsWithSameName() throws Exception {
+ final String repositoryName = "test-repo";
+ final String snapshotName = "test-snap";
+ final String indexName = "test-idx";
+ final Client client = client();
+ final Path repo = randomRepoPath();
+
+ logger.info("--> creating repository at {}", repo.toAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository(repositoryName)
+ .setType("fs").setSettings(Settings.builder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
+ logger.info("--> creating an index and indexing documents");
+ createIndex(indexName);
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+
+ logger.info("--> take first snapshot");
+ CreateSnapshotResponse createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(repositoryName, snapshotName)
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(),
+ equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
+
+ logger.info("--> index more documents");
+ for (int i = 10; i < 20; i++) {
+ index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+
+ logger.info("--> second snapshot of the same name should fail");
+ try {
+ createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(repositoryName, snapshotName)
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ fail("should not be allowed to create a snapshot with the same name as an already existing snapshot: " +
+ createSnapshotResponse.getSnapshotInfo().snapshotId());
+ } catch (SnapshotCreationException e) {
+ assertThat(e.getMessage(), containsString("snapshot with the same name already exists"));
+ }
+
+ logger.info("--> delete the first snapshot");
+ client.admin().cluster().prepareDeleteSnapshot(repositoryName, snapshotName).get();
+
+ logger.info("--> try creating a snapshot with the same name, now it should work because the first one was deleted");
+ createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(repositoryName, snapshotName)
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().snapshotId().getName(), equalTo(snapshotName));
+ }
+
+ public void testGetSnapshotsRequest() throws Exception {
+ final String repositoryName = "test-repo";
+ final String indexName = "test-idx";
+ final Client client = client();
+ final Path repo = randomRepoPath();
+
+ logger.info("--> creating repository at {}", repo.toAbsolutePath());
+ assertAcked(client.admin().cluster().preparePutRepository(repositoryName)
+ .setType("mock").setSettings(Settings.builder()
+ .put("location", repo)
+ .put("compress", false)
+ .put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)
+ .put("wait_after_unblock", 200)));
+
+ logger.info("--> get snapshots on an empty repository");
+ expectThrows(SnapshotMissingException.class, () -> client.admin()
+ .cluster()
+ .prepareGetSnapshots(repositoryName)
+ .addSnapshots("non-existent-snapshot")
+ .get());
+ // with ignore unavailable set to true, should not throw an exception
+ GetSnapshotsResponse getSnapshotsResponse = client.admin()
+ .cluster()
+ .prepareGetSnapshots(repositoryName)
+ .setIgnoreUnavailable(true)
+ .addSnapshots("non-existent-snapshot")
+ .get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(0));
+
+ logger.info("--> creating an index and indexing documents");
+ // Create index on 2 nodes and make sure each node has a primary by setting no replicas
+ assertAcked(prepareCreate(indexName, 1, Settings.builder().put("number_of_replicas", 0)));
+ ensureGreen();
+ for (int i = 0; i < 10; i++) {
+ index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+
+ final int numSnapshots = randomIntBetween(1, 3) + 1;
+ logger.info("--> take {} snapshot(s)", numSnapshots);
+ final String[] snapshotNames = new String[numSnapshots];
+ for (int i = 0; i < numSnapshots - 1; i++) {
+ final String snapshotName = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
+ CreateSnapshotResponse createSnapshotResponse = client.admin()
+ .cluster()
+ .prepareCreateSnapshot(repositoryName, snapshotName)
+ .setWaitForCompletion(true)
+ .setIndices(indexName)
+ .get();
+ assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
+ snapshotNames[i] = snapshotName;
+ }
+ logger.info("--> take another snapshot to be in-progress");
+ // add documents so there are data files to block on
+ for (int i = 10; i < 20; i++) {
+ index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
+ }
+ refresh();
+
+ final String inProgressSnapshot = randomAsciiOfLength(8).toLowerCase(Locale.ROOT);
+ snapshotNames[numSnapshots - 1] = inProgressSnapshot;
+ // block a node so the create snapshot operation can remain in progress
+ final String blockedNode = blockNodeWithIndex(repositoryName, indexName);
+ client.admin().cluster().prepareCreateSnapshot(repositoryName, inProgressSnapshot)
+ .setWaitForCompletion(false)
+ .setIndices(indexName)
+ .get();
+ waitForBlock(blockedNode, repositoryName, TimeValue.timeValueSeconds(60)); // wait for block to kick in
+
+ logger.info("--> get all snapshots with a current in-progress");
+ // with ignore unavailable set to true, should not throw an exception
+ getSnapshotsResponse = client.admin().cluster()
+ .prepareGetSnapshots(repositoryName)
+ .addSnapshots("_all")
+ .get();
+ List sortedNames = Arrays.asList(snapshotNames);
+ Collections.sort(sortedNames);
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
+ assertThat(getSnapshotsResponse.getSnapshots().stream()
+ .map(s -> s.snapshotId().getName())
+ .sorted()
+ .collect(Collectors.toList()), equalTo(sortedNames));
+
+ getSnapshotsResponse = client.admin().cluster()
+ .prepareGetSnapshots(repositoryName)
+ .addSnapshots(snapshotNames)
+ .get();
+ sortedNames = Arrays.asList(snapshotNames);
+ Collections.sort(sortedNames);
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
+ assertThat(getSnapshotsResponse.getSnapshots().stream()
+ .map(s -> s.snapshotId().getName())
+ .sorted()
+ .collect(Collectors.toList()), equalTo(sortedNames));
+
+ logger.info("--> make sure duplicates are not returned in the response");
+ String regexName = snapshotNames[randomIntBetween(0, numSnapshots - 1)];
+ final int splitPos = regexName.length() / 2;
+ final String firstRegex = regexName.substring(0, splitPos) + "*";
+ final String secondRegex = "*" + regexName.substring(splitPos);
+ getSnapshotsResponse = client.admin().cluster()
+ .prepareGetSnapshots(repositoryName)
+ .addSnapshots(snapshotNames)
+ .addSnapshots(firstRegex, secondRegex)
+ .get();
+ assertThat(getSnapshotsResponse.getSnapshots().size(), equalTo(numSnapshots));
+ assertThat(getSnapshotsResponse.getSnapshots().stream()
+ .map(s -> s.snapshotId().getName())
+ .sorted()
+ .collect(Collectors.toList()), equalTo(sortedNames));
+
+ unblockNode(repositoryName, blockedNode); // unblock node
+ waitForCompletion(repositoryName, inProgressSnapshot, TimeValue.timeValueSeconds(60));
+ }
+
}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java
index e6379468382..31c0a193f07 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotBackwardsCompatibilityIT.java
@@ -86,7 +86,7 @@ public class SnapshotBackwardsCompatibilityIT extends ESBackcompatTestCase {
counts[i] = client().prepareSearch(indices[i]).setSize(0).get().getHits().totalHits();
}
- logger.info("--> snapshot subset of indices before upgrage");
+ logger.info("--> snapshot subset of indices before upgrade");
CreateSnapshotResponse createSnapshotResponse = client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("index_before_*").get();
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), greaterThan(0));
assertThat(createSnapshotResponse.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponse.getSnapshotInfo().totalShards()));
diff --git a/core/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java b/core/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java
new file mode 100644
index 00000000000..cb297785e4b
--- /dev/null
+++ b/core/src/test/java/org/elasticsearch/snapshots/SnapshotTests.java
@@ -0,0 +1,57 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.snapshots;
+
+import org.elasticsearch.common.UUIDs;
+import org.elasticsearch.common.io.stream.ByteBufferStreamInput;
+import org.elasticsearch.common.io.stream.BytesStreamOutput;
+import org.elasticsearch.test.ESTestCase;
+
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+import static org.hamcrest.CoreMatchers.equalTo;
+
+/**
+ * Tests for the {@link Snapshot} class.
+ */
+public class SnapshotTests extends ESTestCase {
+
+ public void testSnapshotEquals() {
+ final SnapshotId snapshotId = new SnapshotId("snap", UUIDs.randomBase64UUID());
+ final Snapshot original = new Snapshot("repo", snapshotId);
+ final Snapshot expected = new Snapshot(original.getRepository(), original.getSnapshotId());
+ assertThat(expected, equalTo(original));
+ assertThat(expected.getRepository(), equalTo(original.getRepository()));
+ assertThat(expected.getSnapshotId(), equalTo(original.getSnapshotId()));
+ assertThat(expected.getSnapshotId().getName(), equalTo(original.getSnapshotId().getName()));
+ assertThat(expected.getSnapshotId().getUUID(), equalTo(original.getSnapshotId().getUUID()));
+ }
+
+ public void testSerialization() throws IOException {
+ final SnapshotId snapshotId = new SnapshotId(randomAsciiOfLength(randomIntBetween(2, 8)), UUIDs.randomBase64UUID());
+ final Snapshot original = new Snapshot(randomAsciiOfLength(randomIntBetween(2, 8)), snapshotId);
+ final BytesStreamOutput out = new BytesStreamOutput();
+ original.writeTo(out);
+ final ByteBufferStreamInput in = new ByteBufferStreamInput(ByteBuffer.wrap(out.bytes().toBytes()));
+ assertThat(new Snapshot(in), equalTo(original));
+ }
+
+}
diff --git a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
index 7df44738076..8b02f90f51c 100644
--- a/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
+++ b/core/src/test/java/org/elasticsearch/snapshots/mockstore/MockRepository.java
@@ -21,7 +21,7 @@ package org.elasticsearch.snapshots.mockstore;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.blobstore.BlobContainer;
import org.elasticsearch.common.blobstore.BlobMetaData;
@@ -174,6 +174,10 @@ public class MockRepository extends FsRepository {
blockOnControlFiles = blocked;
}
+ public boolean blockOnDataFiles() {
+ return blockOnDataFiles;
+ }
+
public synchronized void unblockExecution() {
blocked = false;
// Clean blocking flags, so we wouldn't try to block again
diff --git a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java
index 8af614df605..4d3459cdcd4 100644
--- a/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java
+++ b/plugins/repository-azure/src/main/java/org/elasticsearch/repositories/azure/AzureRepository.java
@@ -24,7 +24,7 @@ import com.microsoft.azure.storage.StorageException;
import org.elasticsearch.cloud.azure.blobstore.AzureBlobStore;
import org.elasticsearch.cloud.azure.storage.AzureStorageService.Storage;
import org.elasticsearch.cluster.metadata.MetaData;
-import org.elasticsearch.cluster.metadata.SnapshotId;
+import org.elasticsearch.snapshots.SnapshotId;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
@@ -166,7 +166,7 @@ public class AzureRepository extends BlobStoreRepository {
super.initializeSnapshot(snapshotId, indices, metaData);
} catch (StorageException | URISyntaxException e) {
logger.warn("can not initialize container [{}]: [{}]", blobStore.container(), e.getMessage());
- throw new SnapshotCreationException(snapshotId, e);
+ throw new SnapshotCreationException(repositoryName, snapshotId, e);
}
}
diff --git a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java
index 5a1c76df413..8160c560325 100644
--- a/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java
+++ b/plugins/repository-azure/src/test/java/org/elasticsearch/cloud/azure/storage/AzureStorageServiceMock.java
@@ -94,8 +94,15 @@ public class AzureStorageServiceMock extends AbstractLifecycleComponent listBlobsByPrefix(String account, LocationMode mode, String container, String keyPath, String prefix) {
MapBuilder blobsBuilder = MapBuilder.newMapBuilder();
for (String blobName : blobs.keySet()) {
- if (startsWithIgnoreCase(blobName, prefix)) {
- blobsBuilder.put(blobName, new PlainBlobMetaData(blobName, blobs.get(blobName).size()));
+ final String checkBlob;
+ if (keyPath != null) {
+ // strip off key path from the beginning of the blob name
+ checkBlob = blobName.replace(keyPath, "");
+ } else {
+ checkBlob = blobName;
+ }
+ if (startsWithIgnoreCase(checkBlob, prefix)) {
+ blobsBuilder.put(blobName, new PlainBlobMetaData(checkBlob, blobs.get(blobName).size()));
}
}
return blobsBuilder.immutableMap();
diff --git a/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java b/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
index 50d677c600c..98155967514 100644
--- a/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
+++ b/test/framework/src/main/java/org/elasticsearch/common/io/FileTestUtils.java
@@ -19,11 +19,15 @@
package org.elasticsearch.common.io;
+import org.elasticsearch.common.Nullable;
import org.junit.Assert;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.util.zip.ZipEntry;
+import java.util.zip.ZipInputStream;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertFileExists;
import static org.hamcrest.CoreMatchers.equalTo;
@@ -50,4 +54,44 @@ public class FileTestUtils {
Assert.assertThat(fileContent.trim(), equalTo(expected.trim()));
}
}
+
+ /**
+ * Unzip a zip file to a destination directory. If the zip file does not exist, an IOException is thrown.
+ * If the destination directory does not exist, it will be created.
+ *
+ * @param zip zip file to unzip
+ * @param destDir directory to unzip the file to
+ * @param prefixToRemove the (optional) prefix in the zip file path to remove when writing to the destination directory
+ * @throws IOException if zip file does not exist, or there was an error reading from the zip file or
+ * writing to the destination directory
+ */
+ public static void unzip(final Path zip, final Path destDir, @Nullable final String prefixToRemove) throws IOException {
+ if (Files.notExists(zip)) {
+ throw new IOException("[" + zip + "] zip file must exist");
+ }
+ Files.createDirectories(destDir);
+
+ try (final ZipInputStream zipInput = new ZipInputStream(Files.newInputStream(zip))) {
+ ZipEntry entry;
+ while ((entry = zipInput.getNextEntry()) != null) {
+ final String entryPath;
+ if (prefixToRemove != null) {
+ if (entry.getName().startsWith(prefixToRemove)) {
+ entryPath = entry.getName().substring(prefixToRemove.length());
+ } else {
+ throw new IOException("prefix not found: " + prefixToRemove);
+ }
+ } else {
+ entryPath = entry.getName();
+ }
+ final Path path = Paths.get(destDir.toString(), entryPath);
+ if (entry.isDirectory()) {
+ Files.createDirectories(path);
+ } else {
+ Files.copy(zipInput, path);
+ }
+ zipInput.closeEntry();
+ }
+ }
+ }
}
diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
index c86c111f645..f4d5b4dd482 100644
--- a/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
+++ b/test/framework/src/main/java/org/elasticsearch/test/ESSingleNodeTestCase.java
@@ -54,6 +54,7 @@ import org.junit.Before;
import org.junit.BeforeClass;
import java.io.IOException;
+import java.nio.file.Path;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
@@ -171,14 +172,15 @@ public abstract class ESSingleNodeTestCase extends ESTestCase {
}
private Node newNode() {
+ final Path tempDir = createTempDir();
Settings settings = Settings.builder()
.put(ClusterName.CLUSTER_NAME_SETTING.getKey(), InternalTestCluster.clusterName("single-node-cluster", randomLong()))
- .put(Environment.PATH_HOME_SETTING.getKey(), createTempDir())
+ .put(Environment.PATH_HOME_SETTING.getKey(), tempDir)
+ .put(Environment.PATH_REPO_SETTING.getKey(), tempDir.resolve("repo"))
// TODO: use a consistent data path for custom paths
// This needs to tie into the ESIntegTestCase#indexSettings() method
.put(Environment.PATH_SHARED_DATA_SETTING.getKey(), createTempDir().getParent())
.put("node.name", nodeName())
-
.put("script.inline", "true")
.put("script.stored", "true")
.put(EsExecutors.PROCESSORS_SETTING.getKey(), 1) // limit the number of threads created