Remove IndexMeta and addresses code review comments
This commit is contained in:
parent
d9ec959dfc
commit
a6f5e0b0fe
|
@ -23,6 +23,7 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedHashSet;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
@ -53,6 +54,13 @@ public final class Sets {
|
|||
return set;
|
||||
}
|
||||
|
||||
public static <T> LinkedHashSet<T> newLinkedHashSet(T... elements) {
|
||||
Objects.requireNonNull(elements);
|
||||
LinkedHashSet<T> set = new LinkedHashSet<>(elements.length);
|
||||
Collections.addAll(set, elements);
|
||||
return set;
|
||||
}
|
||||
|
||||
public static <T> Set<T> newConcurrentHashSet() {
|
||||
return Collections.newSetFromMap(new ConcurrentHashMap<>());
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Objects;
|
||||
|
@ -59,6 +60,12 @@ public final class IndexId implements Writeable, ToXContent {
|
|||
* The unique ID for the index within the repository. This is *not* the same as the
|
||||
* index's UUID, but merely a unique file/URL friendly identifier that a repository can
|
||||
* use to name blobs for the index.
|
||||
*
|
||||
* We could not use the index's actual UUID (See {@link Index#getUUID()}) because in the
|
||||
* case of snapshot/restore, the index UUID in the snapshotted index will be different
|
||||
* from the index UUID assigned to it when it is restored. Hence, the actual index UUID
|
||||
* is not useful in the context of snapshot/restore for tying a snapshotted index to the
|
||||
* index it was snapshot from, and so we are using a separate UUID here.
|
||||
*/
|
||||
public String getId() {
|
||||
return id;
|
||||
|
|
|
@ -20,9 +20,7 @@
|
|||
package org.elasticsearch.repositories;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.UUIDs;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
@ -37,6 +35,7 @@ import java.util.List;
|
|||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
|
@ -52,13 +51,24 @@ public final class RepositoryData implements ToXContent {
|
|||
*/
|
||||
private final List<SnapshotId> snapshotIds;
|
||||
/**
|
||||
* The indices found in the repository across all snapshots.
|
||||
* The indices found in the repository across all snapshots, as a name to {@link IndexId} mapping
|
||||
*/
|
||||
private final Map<String, IndexMeta> indices;
|
||||
private final Map<String, IndexId> indices;
|
||||
/**
|
||||
* The snapshots that each index belongs to.
|
||||
*/
|
||||
private final Map<IndexId, Set<SnapshotId>> indexSnapshots;
|
||||
|
||||
public RepositoryData(List<SnapshotId> snapshotIds, Map<String, IndexMeta> indices) {
|
||||
public RepositoryData(List<SnapshotId> snapshotIds, Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
this.snapshotIds = Collections.unmodifiableList(snapshotIds);
|
||||
this.indices = Collections.unmodifiableMap(indices);
|
||||
this.indices = Collections.unmodifiableMap(indexSnapshots.keySet()
|
||||
.stream()
|
||||
.collect(Collectors.toMap(IndexId::getName, Function.identity())));
|
||||
this.indexSnapshots = Collections.unmodifiableMap(indexSnapshots);
|
||||
}
|
||||
|
||||
protected RepositoryData copy() {
|
||||
return new RepositoryData(snapshotIds, indexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -69,9 +79,9 @@ public final class RepositoryData implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Returns an unmodifiable map of the index names to index metadata in the repository.
|
||||
* Returns an unmodifiable map of the index names to {@link IndexId} in the repository.
|
||||
*/
|
||||
public Map<String, IndexMeta> getIndices() {
|
||||
public Map<String, IndexId> getIndices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
|
@ -85,32 +95,29 @@ public final class RepositoryData implements ToXContent {
|
|||
}
|
||||
List<SnapshotId> snapshots = new ArrayList<>(snapshotIds);
|
||||
snapshots.add(snapshotId);
|
||||
Map<String, IndexMeta> indexMetaMap = getIndices();
|
||||
Map<String, IndexMeta> addedIndices = new HashMap<>();
|
||||
for (IndexId indexId : snapshottedIndices) {
|
||||
final String indexName = indexId.getName();
|
||||
IndexMeta newIndexMeta;
|
||||
if (indexMetaMap.containsKey(indexName)) {
|
||||
newIndexMeta = indexMetaMap.get(indexName).addSnapshot(snapshotId);
|
||||
Map<IndexId, Set<SnapshotId>> allIndexSnapshots = new HashMap<>(indexSnapshots);
|
||||
for (final IndexId indexId : snapshottedIndices) {
|
||||
if (allIndexSnapshots.containsKey(indexId)) {
|
||||
Set<SnapshotId> ids = allIndexSnapshots.get(indexId);
|
||||
if (ids == null) {
|
||||
ids = new LinkedHashSet<>();
|
||||
allIndexSnapshots.put(indexId, ids);
|
||||
}
|
||||
ids.add(snapshotId);
|
||||
} else {
|
||||
Set<SnapshotId> ids = new LinkedHashSet<>();
|
||||
ids.add(snapshotId);
|
||||
newIndexMeta = new IndexMeta(indexId, ids);
|
||||
allIndexSnapshots.put(indexId, ids);
|
||||
}
|
||||
addedIndices.put(indexName, newIndexMeta);
|
||||
}
|
||||
Map<String, IndexMeta> allIndices = new HashMap<>(indices);
|
||||
allIndices.putAll(addedIndices);
|
||||
return new RepositoryData(snapshots, allIndices);
|
||||
return new RepositoryData(snapshots, allIndexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
* Add indices to the repository metadata; returns a new instance.
|
||||
* Initializes the indices in the repository metadata; returns a new instance.
|
||||
*/
|
||||
public RepositoryData addIndices(final Map<String, IndexMeta> newIndices) {
|
||||
Map<String, IndexMeta> map = new HashMap<>(indices);
|
||||
map.putAll(newIndices);
|
||||
return new RepositoryData(snapshotIds, map);
|
||||
public RepositoryData initIndices(final Map<IndexId, Set<SnapshotId>> indexSnapshots) {
|
||||
return new RepositoryData(snapshotIds, indexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -121,23 +128,36 @@ public final class RepositoryData implements ToXContent {
|
|||
.stream()
|
||||
.filter(id -> snapshotId.equals(id) == false)
|
||||
.collect(Collectors.toList());
|
||||
Map<String, IndexMeta> newIndices = new HashMap<>();
|
||||
for (IndexMeta indexMeta : indices.values()) {
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
for (final IndexId indexId : indices.values()) {
|
||||
Set<SnapshotId> set;
|
||||
if (indexMeta.getSnapshotIds().contains(snapshotId)) {
|
||||
if (indexMeta.getSnapshotIds().size() == 1) {
|
||||
Set<SnapshotId> snapshotIds = this.indexSnapshots.get(indexId);
|
||||
assert snapshotIds != null;
|
||||
if (snapshotIds.contains(snapshotId)) {
|
||||
if (snapshotIds.size() == 1) {
|
||||
// removing the snapshot will mean no more snapshots have this index, so just skip over it
|
||||
continue;
|
||||
}
|
||||
set = new LinkedHashSet<>(indexMeta.getSnapshotIds());
|
||||
set = new LinkedHashSet<>(snapshotIds);
|
||||
set.remove(snapshotId);
|
||||
} else {
|
||||
set = indexMeta.getSnapshotIds();
|
||||
set = snapshotIds;
|
||||
}
|
||||
newIndices.put(indexMeta.getName(), new IndexMeta(indexMeta.getIndexId(), set));
|
||||
indexSnapshots.put(indexId, set);
|
||||
}
|
||||
|
||||
return new RepositoryData(newSnapshotIds, newIndices);
|
||||
return new RepositoryData(newSnapshotIds, indexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns an immutable collection of the snapshot ids for the snapshots that contain the given index.
|
||||
*/
|
||||
public Set<SnapshotId> getSnapshots(final IndexId indexId) {
|
||||
Set<SnapshotId> snapshotIds = indexSnapshots.get(indexId);
|
||||
if (snapshotIds == null) {
|
||||
throw new IllegalArgumentException("unknown snapshot index " + indexId + "");
|
||||
}
|
||||
return snapshotIds;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -149,12 +169,14 @@ public final class RepositoryData implements ToXContent {
|
|||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked") RepositoryData that = (RepositoryData) obj;
|
||||
return snapshotIds.equals(that.snapshotIds) && indices.equals(that.indices);
|
||||
return snapshotIds.equals(that.snapshotIds)
|
||||
&& indices.equals(that.indices)
|
||||
&& indexSnapshots.equals(that.indexSnapshots);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(snapshotIds, indices);
|
||||
return Objects.hash(snapshotIds, indices, indexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -163,7 +185,7 @@ public final class RepositoryData implements ToXContent {
|
|||
*/
|
||||
public IndexId resolveIndexId(final String indexName) {
|
||||
if (indices.containsKey(indexName)) {
|
||||
return indices.get(indexName).getIndexId();
|
||||
return indices.get(indexName);
|
||||
} else {
|
||||
// on repositories created before 5.0, there was no indices information in the index
|
||||
// blob, so if the repository hasn't been updated with new snapshots, no new index blob
|
||||
|
@ -174,8 +196,7 @@ public final class RepositoryData implements ToXContent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Resolve the given index names to index ids, throwing an exception
|
||||
* if any of the indices could not be resolved.
|
||||
* Resolve the given index names to index ids.
|
||||
*/
|
||||
public List<IndexId> resolveIndices(final List<String> indices) {
|
||||
List<IndexId> resolvedIndices = new ArrayList<>(indices.size());
|
||||
|
@ -185,6 +206,24 @@ public final class RepositoryData implements ToXContent {
|
|||
return resolvedIndices;
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the given index names to index ids, creating new index ids for
|
||||
* new indices in the repository.
|
||||
*/
|
||||
public List<IndexId> resolveNewIndices(final List<String> indicesToResolve) {
|
||||
List<IndexId> snapshotIndices = new ArrayList<>();
|
||||
for (String index : indicesToResolve) {
|
||||
final IndexId indexId;
|
||||
if (indices.containsKey(index)) {
|
||||
indexId = indices.get(index);
|
||||
} else {
|
||||
indexId = new IndexId(index, UUIDs.randomBase64UUID());
|
||||
}
|
||||
snapshotIndices.add(indexId);
|
||||
}
|
||||
return snapshotIndices;
|
||||
}
|
||||
|
||||
private static final String SNAPSHOTS = "snapshots";
|
||||
private static final String INDICES = "indices";
|
||||
private static final String INDEX_ID = "id";
|
||||
|
@ -200,11 +239,13 @@ public final class RepositoryData implements ToXContent {
|
|||
builder.endArray();
|
||||
// write the indices map
|
||||
builder.startObject(INDICES);
|
||||
for (final IndexMeta indexMeta : getIndices().values()) {
|
||||
builder.startObject(indexMeta.getName());
|
||||
builder.field(INDEX_ID, indexMeta.getId());
|
||||
for (final IndexId indexId : getIndices().values()) {
|
||||
builder.startObject(indexId.getName());
|
||||
builder.field(INDEX_ID, indexId.getId());
|
||||
builder.startArray(SNAPSHOTS);
|
||||
for (final SnapshotId snapshotId : indexMeta.getSnapshotIds()) {
|
||||
Set<SnapshotId> snapshotIds = indexSnapshots.get(indexId);
|
||||
assert snapshotIds != null;
|
||||
for (final SnapshotId snapshotId : snapshotIds) {
|
||||
snapshotId.toXContent(builder, params);
|
||||
}
|
||||
builder.endArray();
|
||||
|
@ -217,7 +258,7 @@ public final class RepositoryData implements ToXContent {
|
|||
|
||||
public static RepositoryData fromXContent(final XContentParser parser) throws IOException {
|
||||
List<SnapshotId> snapshots = new ArrayList<>();
|
||||
Map<String, IndexMeta> indices = new HashMap<>();
|
||||
Map<IndexId, Set<SnapshotId>> indexSnapshots = new HashMap<>();
|
||||
if (parser.nextToken() == XContentParser.Token.START_OBJECT) {
|
||||
while (parser.nextToken() == XContentParser.Token.FIELD_NAME) {
|
||||
String currentFieldName = parser.currentName();
|
||||
|
@ -255,7 +296,7 @@ public final class RepositoryData implements ToXContent {
|
|||
}
|
||||
}
|
||||
assert indexId != null;
|
||||
indices.put(indexName, new IndexMeta(indexName, indexId, snapshotIds));
|
||||
indexSnapshots.put(new IndexId(indexName, indexId), snapshotIds);
|
||||
}
|
||||
} else {
|
||||
throw new ElasticsearchParseException("unknown field name [" + currentFieldName + "]");
|
||||
|
@ -264,98 +305,7 @@ public final class RepositoryData implements ToXContent {
|
|||
} else {
|
||||
throw new ElasticsearchParseException("start object expected");
|
||||
}
|
||||
return new RepositoryData(snapshots, indices);
|
||||
return new RepositoryData(snapshots, indexSnapshots);
|
||||
}
|
||||
|
||||
/**
|
||||
* Represents information about a single index snapshotted in a repository.
|
||||
*/
|
||||
public static final class IndexMeta implements Writeable {
|
||||
private final IndexId indexId;
|
||||
private final Set<SnapshotId> snapshotIds;
|
||||
|
||||
public IndexMeta(final String name, final String id, final Set<SnapshotId> snapshotIds) {
|
||||
this(new IndexId(name, id), snapshotIds);
|
||||
}
|
||||
|
||||
public IndexMeta(final IndexId indexId, final Set<SnapshotId> snapshotIds) {
|
||||
this.indexId = indexId;
|
||||
this.snapshotIds = Collections.unmodifiableSet(snapshotIds);
|
||||
}
|
||||
|
||||
public IndexMeta(final StreamInput in) throws IOException {
|
||||
indexId = new IndexId(in);
|
||||
final int size = in.readVInt();
|
||||
Set<SnapshotId> ids = new LinkedHashSet<>();
|
||||
for (int i = 0; i < size; i++) {
|
||||
ids.add(new SnapshotId(in));
|
||||
}
|
||||
snapshotIds = Collections.unmodifiableSet(ids);
|
||||
}
|
||||
|
||||
/**
|
||||
* The name of the index.
|
||||
*/
|
||||
public String getName() {
|
||||
return indexId.getName();
|
||||
}
|
||||
|
||||
/**
|
||||
* The unique ID for the index within the repository. This is *not* the same as the
|
||||
* index's UUID, but merely a unique file/URL friendly identifier that a repository can
|
||||
* use to name blobs for the index.
|
||||
*/
|
||||
public String getId() {
|
||||
return indexId.getId();
|
||||
}
|
||||
|
||||
/**
|
||||
* An unmodifiable set of snapshot ids that contain this index as part of its snapshot.
|
||||
*/
|
||||
public Set<SnapshotId> getSnapshotIds() {
|
||||
return snapshotIds;
|
||||
}
|
||||
|
||||
/**
|
||||
* The snapshotted index id.
|
||||
*/
|
||||
public IndexId getIndexId() {
|
||||
return indexId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Add a snapshot id to the list of snapshots that contain this index.
|
||||
*/
|
||||
public IndexMeta addSnapshot(final SnapshotId snapshotId) {
|
||||
Set<SnapshotId> withAdded = new LinkedHashSet<>(snapshotIds);
|
||||
withAdded.add(snapshotId);
|
||||
return new IndexMeta(indexId, withAdded);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(final StreamOutput out) throws IOException {
|
||||
indexId.writeTo(out);
|
||||
out.writeVInt(snapshotIds.size());
|
||||
for (SnapshotId snapshotId : snapshotIds) {
|
||||
snapshotId.writeTo(out);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object obj) {
|
||||
if (this == obj) {
|
||||
return true;
|
||||
}
|
||||
if (obj == null || getClass() != obj.getClass()) {
|
||||
return false;
|
||||
}
|
||||
@SuppressWarnings("unchecked") IndexMeta that = (IndexMeta) obj;
|
||||
return indexId.equals(that.indexId) && snapshotIds.equals(that.snapshotIds);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(indexId, snapshotIds);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -62,7 +62,6 @@ import org.elasticsearch.index.store.StoreFileMetaData;
|
|||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.repositories.RepositoryData.IndexMeta;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.Strings;
|
||||
|
@ -109,7 +108,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
|
@ -133,7 +131,7 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* |- meta-20131011.dat - JSON serialized MetaData for snapshot "20131011"
|
||||
* .....
|
||||
* |- indices/ - data for all indices
|
||||
* |- foo/ - data for index "foo"
|
||||
* |- Ac1342-B_x/ - data for index "foo" which was assigned the unique id of Ac1342-B_x in the repository
|
||||
* | |- meta-20131010.dat - JSON Serialized IndexMetaData for index "foo"
|
||||
* | |- 0/ - data for shard "0" of index "foo"
|
||||
* | | |- __1 \
|
||||
|
@ -153,7 +151,7 @@ import static java.util.Collections.unmodifiableMap;
|
|||
* | |-2/
|
||||
* | ......
|
||||
* |
|
||||
* |- bar/ - data for index bar
|
||||
* |- 1xB0D8_B3y/ - data for index "bar" which was assigned the unique id of 1xB0D8_B3y in the repository
|
||||
* ......
|
||||
* }
|
||||
* </pre>
|
||||
|
@ -230,6 +228,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
|
||||
private final ChecksumBlobStoreFormat<BlobStoreIndexShardSnapshots> indexShardSnapshotsFormat;
|
||||
|
||||
// flag to indicate if the index gen file has been checked for updating from pre 5.0 versions
|
||||
private volatile boolean indexGenChecked;
|
||||
|
||||
/**
|
||||
* Constructs new BlobStoreRepository
|
||||
*
|
||||
|
@ -306,19 +307,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
return null;
|
||||
}
|
||||
|
||||
public Map<String, IndexId> getIndices() {
|
||||
try {
|
||||
return readIndexGen()
|
||||
.getIndices()
|
||||
.values()
|
||||
.stream()
|
||||
.map(IndexMeta::getIndexId)
|
||||
.collect(Collectors.toMap(IndexId::getName, Function.identity()));
|
||||
} catch (IOException e) {
|
||||
throw new RepositoryException(metadata.name(), "Could not get the indices in the repository.", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public RepositoryMetaData getMetadata() {
|
||||
return metadata;
|
||||
|
@ -335,23 +323,20 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
if (getSnapshots().stream().anyMatch(s -> s.getName().equals(snapshotName))) {
|
||||
throw new SnapshotCreationException(metadata.name(), snapshotId, "snapshot with the same name already exists");
|
||||
}
|
||||
if (snapshotFormat.exists(snapshotsBlobContainer, blobId(snapshotId)) ||
|
||||
if (snapshotFormat.exists(snapshotsBlobContainer, snapshotId.getUUID()) ||
|
||||
snapshotLegacyFormat.exists(snapshotsBlobContainer, snapshotName)) {
|
||||
throw new SnapshotCreationException(metadata.name(), snapshotId, "snapshot with such name already exists");
|
||||
}
|
||||
|
||||
// update the index file if created pre 5.0 to include the indices in the repository
|
||||
updateIndexGenIfNecessary();
|
||||
|
||||
// Write Global MetaData
|
||||
globalMetaDataFormat.write(clusterMetaData, snapshotsBlobContainer, blobId(snapshotId));
|
||||
globalMetaDataFormat.write(clusterMetaData, snapshotsBlobContainer, snapshotId.getUUID());
|
||||
|
||||
// write the index metadata for each index in the snapshot
|
||||
for (IndexId index : indices) {
|
||||
final IndexMetaData indexMetaData = clusterMetaData.index(index.getName());
|
||||
final BlobPath indexPath = basePath().add("indices").add(index.getId());
|
||||
final BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||
indexMetaDataFormat.write(indexMetaData, indexMetaDataBlobContainer, blobId(snapshotId));
|
||||
indexMetaDataFormat.write(indexMetaData, indexMetaDataBlobContainer, snapshotId.getUUID());
|
||||
}
|
||||
} catch (IOException ex) {
|
||||
throw new SnapshotCreationException(metadata.name(), snapshotId, ex);
|
||||
|
@ -361,35 +346,47 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
// Older repository index files (index-N) only contain snapshot info, not indices info,
|
||||
// so if the index file is of the older format, populate it with the indices entries
|
||||
// so we know which indices of snapshots have blob ids in the older format.
|
||||
private void updateIndexGenIfNecessary() {
|
||||
if (isReadOnly()) {
|
||||
// nothing to update on read only repositories
|
||||
return;
|
||||
private RepositoryData updateIndexGenIfNecessary() throws IOException {
|
||||
if (indexGenChecked) {
|
||||
// an optimization to avoid the blobExists call (which can be expensive
|
||||
// especially for cloud based storage repositories) if the index gen file
|
||||
// has already been updated
|
||||
return null;
|
||||
}
|
||||
if (snapshotsBlobContainer.blobExists(SNAPSHOTS_FILE) == false) {
|
||||
if (isReadOnly() || snapshotsBlobContainer.blobExists(SNAPSHOTS_FILE) == false) {
|
||||
// pre 5.0 repositories have a single index file instead of generational index-N files,
|
||||
// so if the single index file is missing, we already have an up to date repository.
|
||||
return;
|
||||
indexGenChecked = true;
|
||||
return null;
|
||||
}
|
||||
final RepositoryData repositoryData = getRepositoryData();
|
||||
final Map<String, Set<SnapshotId>> indexToSnapshots = new HashMap<>();
|
||||
final RepositoryData repositoryData = readIndexGen();
|
||||
final Map<IndexId, Set<SnapshotId>> indexToSnapshots = new HashMap<>();
|
||||
for (final SnapshotId snapshotId : repositoryData.getSnapshotIds()) {
|
||||
for (final String indexName : getSnapshotInfo(snapshotId).indices()) {
|
||||
if (indexToSnapshots.containsKey(indexName)) {
|
||||
indexToSnapshots.get(indexName).add(snapshotId);
|
||||
final SnapshotInfo snapshotInfo;
|
||||
try {
|
||||
snapshotInfo = getSnapshotInfo(snapshotId);
|
||||
} catch (SnapshotException e) {
|
||||
logger.warn("[{}] repository is on a pre-5.0 format with an index file that contains snapshot [{}] but " +
|
||||
"the corresponding snap-{}.dat file cannot be read. The snapshot will no longer be included in " +
|
||||
"the repository but its data directories will remain.", e, getMetadata().name(),
|
||||
snapshotId, snapshotId.getUUID());
|
||||
continue;
|
||||
}
|
||||
for (final String indexName : snapshotInfo.indices()) {
|
||||
final IndexId indexId = new IndexId(indexName, indexName);
|
||||
if (indexToSnapshots.containsKey(indexId)) {
|
||||
indexToSnapshots.get(indexId).add(snapshotId);
|
||||
} else {
|
||||
indexToSnapshots.put(indexName, Sets.newHashSet(snapshotId));
|
||||
indexToSnapshots.put(indexId, Sets.newHashSet(snapshotId));
|
||||
}
|
||||
}
|
||||
}
|
||||
final Map<String, IndexMeta> indices = new HashMap<>();
|
||||
for (Map.Entry<String, Set<SnapshotId>> entry : indexToSnapshots.entrySet()) {
|
||||
final String indexName = entry.getKey();
|
||||
indices.put(indexName, new IndexMeta(indexName, indexName, entry.getValue()));
|
||||
}
|
||||
try {
|
||||
// write the new index gen file with the indices included
|
||||
writeIndexGen(repositoryData.addIndices(indices));
|
||||
final RepositoryData updatedRepoData = repositoryData.initIndices(indexToSnapshots);
|
||||
writeIndexGen(updatedRepoData);
|
||||
indexGenChecked = true;
|
||||
return updatedRepoData;
|
||||
} catch (IOException e) {
|
||||
throw new RepositoryException(metadata.name(), "failed to update the repository index blob with indices data on startup", e);
|
||||
}
|
||||
|
@ -426,9 +423,9 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
writeIndexGen(repositoryData.removeSnapshot(snapshotId));
|
||||
|
||||
// delete the snapshot file
|
||||
safeSnapshotBlobDelete(snapshot, blobId(snapshotId));
|
||||
safeSnapshotBlobDelete(snapshot, snapshotId.getUUID());
|
||||
// delete the global metadata file
|
||||
safeGlobalMetaDataBlobDelete(snapshot, blobId(snapshotId));
|
||||
safeGlobalMetaDataBlobDelete(snapshot, snapshotId.getUUID());
|
||||
|
||||
// Now delete all indices
|
||||
for (String index : indices) {
|
||||
|
@ -436,7 +433,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
BlobPath indexPath = basePath().add("indices").add(indexId.getId());
|
||||
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||
try {
|
||||
indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, blobId(snapshotId));
|
||||
indexMetaDataFormat(snapshot.version()).delete(indexMetaDataBlobContainer, snapshotId.getUUID());
|
||||
} catch (IOException ex) {
|
||||
logger.warn("[{}] failed to delete metadata for index [{}]", ex, snapshotId, index);
|
||||
}
|
||||
|
@ -524,7 +521,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
System.currentTimeMillis(),
|
||||
totalShards,
|
||||
shardFailures);
|
||||
snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, blobId(snapshotId));
|
||||
snapshotFormat.write(blobStoreSnapshot, snapshotsBlobContainer, snapshotId.getUUID());
|
||||
final RepositoryData repositoryData = getRepositoryData();
|
||||
List<SnapshotId> snapshotIds = repositoryData.getSnapshotIds();
|
||||
if (!snapshotIds.contains(snapshotId)) {
|
||||
|
@ -548,7 +545,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
@Override
|
||||
public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) {
|
||||
try {
|
||||
return snapshotFormat.read(snapshotsBlobContainer, blobId(snapshotId));
|
||||
return snapshotFormat.read(snapshotsBlobContainer, snapshotId.getUUID());
|
||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||
// File is missing - let's try legacy format instead
|
||||
try {
|
||||
|
@ -569,7 +566,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
// When we delete corrupted snapshots we might not know which version we are dealing with
|
||||
// We can try detecting the version based on the metadata file format
|
||||
assert ignoreIndexErrors;
|
||||
if (globalMetaDataFormat.exists(snapshotsBlobContainer, blobId(snapshotId))) {
|
||||
if (globalMetaDataFormat.exists(snapshotsBlobContainer, snapshotId.getUUID())) {
|
||||
snapshotVersion = Version.CURRENT;
|
||||
} else if (globalMetaDataLegacyFormat.exists(snapshotsBlobContainer, snapshotId.getName())) {
|
||||
throw new SnapshotException(metadata.name(), snapshotId, "snapshot is too old");
|
||||
|
@ -578,7 +575,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
}
|
||||
try {
|
||||
metaData = globalMetaDataFormat(snapshotVersion).read(snapshotsBlobContainer, blobId(snapshotId));
|
||||
metaData = globalMetaDataFormat(snapshotVersion).read(snapshotsBlobContainer, snapshotId.getUUID());
|
||||
} catch (FileNotFoundException | NoSuchFileException ex) {
|
||||
throw new SnapshotMissingException(metadata.name(), snapshotId, ex);
|
||||
} catch (IOException ex) {
|
||||
|
@ -589,7 +586,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
BlobPath indexPath = basePath().add("indices").add(index.getId());
|
||||
BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath);
|
||||
try {
|
||||
metaDataBuilder.put(indexMetaDataFormat(snapshotVersion).read(indexMetaDataBlobContainer, blobId(snapshotId)), false);
|
||||
metaDataBuilder.put(indexMetaDataFormat(snapshotVersion).read(indexMetaDataBlobContainer, snapshotId.getUUID()), false);
|
||||
} catch (ElasticsearchParseException | IOException ex) {
|
||||
if (ignoreIndexErrors) {
|
||||
logger.warn("[{}] [{}] failed to read metadata for index", ex, snapshotId, index.getName());
|
||||
|
@ -706,7 +703,11 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
@Override
|
||||
public RepositoryData getRepositoryData() {
|
||||
try {
|
||||
return readIndexGen();
|
||||
RepositoryData repositoryData = updateIndexGenIfNecessary();
|
||||
if (repositoryData == null) {
|
||||
repositoryData = readIndexGen();
|
||||
}
|
||||
return repositoryData;
|
||||
} catch (NoSuchFileException nsfe) {
|
||||
// repository doesn't have an index blob, its a new blank repo
|
||||
return RepositoryData.EMPTY;
|
||||
|
@ -787,15 +788,6 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
}
|
||||
}
|
||||
|
||||
public static String blobId(final SnapshotId snapshotId) {
|
||||
final String uuid = snapshotId.getUUID();
|
||||
if (uuid.equals(SnapshotId.UNASSIGNED_UUID)) {
|
||||
// the old snapshot blob naming
|
||||
return snapshotId.getName();
|
||||
}
|
||||
return uuid;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the latest snapshot index blob id. Snapshot index blobs are named index-N, where N is
|
||||
* the next version number from when the index blob was written. Each individual index-N blob is
|
||||
|
@ -1000,7 +992,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
int fileListGeneration = tuple.v2();
|
||||
|
||||
try {
|
||||
indexShardSnapshotFormat(version).delete(blobContainer, blobId(snapshotId));
|
||||
indexShardSnapshotFormat(version).delete(blobContainer, snapshotId.getUUID());
|
||||
} catch (IOException e) {
|
||||
logger.debug("[{}] [{}] failed to delete shard snapshot file", shardId, snapshotId);
|
||||
}
|
||||
|
@ -1021,7 +1013,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
*/
|
||||
public BlobStoreIndexShardSnapshot loadSnapshot() {
|
||||
try {
|
||||
return indexShardSnapshotFormat(version).read(blobContainer, blobId(snapshotId));
|
||||
return indexShardSnapshotFormat(version).read(blobContainer, snapshotId.getUUID());
|
||||
} catch (IOException ex) {
|
||||
throw new IndexShardRestoreFailedException(shardId, "failed to read shard snapshot file", ex);
|
||||
}
|
||||
|
@ -1150,7 +1142,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
try {
|
||||
BlobStoreIndexShardSnapshot snapshot = null;
|
||||
if (name.startsWith(SNAPSHOT_PREFIX)) {
|
||||
snapshot = indexShardSnapshotFormat.readBlob(blobContainer, blobId(snapshotId));
|
||||
snapshot = indexShardSnapshotFormat.readBlob(blobContainer, snapshotId.getUUID());
|
||||
} else if (name.startsWith(LEGACY_SNAPSHOT_PREFIX)) {
|
||||
snapshot = indexShardSnapshotLegacyFormat.readBlob(blobContainer, name);
|
||||
}
|
||||
|
@ -1291,7 +1283,7 @@ public abstract class BlobStoreRepository extends AbstractLifecycleComponent imp
|
|||
//TODO: The time stored in snapshot doesn't include cleanup time.
|
||||
logger.trace("[{}] [{}] writing shard snapshot file", shardId, snapshotId);
|
||||
try {
|
||||
indexShardSnapshotFormat.write(snapshot, blobContainer, blobId(snapshotId));
|
||||
indexShardSnapshotFormat.write(snapshot, blobContainer, snapshotId.getUUID());
|
||||
} catch (IOException e) {
|
||||
throw new IndexShardSnapshotFailedException(shardId, "Failed to write commit point", e);
|
||||
}
|
||||
|
|
|
@ -34,10 +34,6 @@ import java.util.Objects;
|
|||
*/
|
||||
public final class SnapshotId implements Writeable, ToXContent {
|
||||
|
||||
/**
|
||||
* This value is for older snapshots that don't have a UUID.
|
||||
*/
|
||||
public static final String UNASSIGNED_UUID = "_na_";
|
||||
private static final String NAME = "name";
|
||||
private static final String UUID = "uuid";
|
||||
|
||||
|
@ -144,10 +140,10 @@ public final class SnapshotId implements Writeable, ToXContent {
|
|||
}
|
||||
}
|
||||
return new SnapshotId(name, uuid);
|
||||
}
|
||||
// the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too
|
||||
else {
|
||||
return new SnapshotId(parser.text(), SnapshotId.UNASSIGNED_UUID);
|
||||
} else {
|
||||
// the old format pre 5.0 that only contains the snapshot name, use the name as the uuid too
|
||||
final String name = parser.text();
|
||||
return new SnapshotId(name, name);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -458,7 +458,7 @@ public final class SnapshotInfo implements Comparable<SnapshotInfo>, ToXContent,
|
|||
}
|
||||
if (uuid == null) {
|
||||
// the old format where there wasn't a UUID
|
||||
uuid = SnapshotId.UNASSIGNED_UUID;
|
||||
uuid = name;
|
||||
}
|
||||
return new SnapshotInfo(new SnapshotId(name, uuid),
|
||||
indices,
|
||||
|
|
|
@ -60,7 +60,6 @@ import org.elasticsearch.repositories.IndexId;
|
|||
import org.elasticsearch.repositories.RepositoriesService;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.repositories.RepositoryData.IndexMeta;
|
||||
import org.elasticsearch.repositories.RepositoryMissingException;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -221,8 +220,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
final String snapshotName = request.snapshotName;
|
||||
validate(repositoryName, snapshotName);
|
||||
final SnapshotId snapshotId = new SnapshotId(snapshotName, UUIDs.randomBase64UUID()); // new UUID for the snapshot
|
||||
final Repository repository = repositoriesService.repository(repositoryName);
|
||||
final Map<String, IndexMeta> indexIds = repository.getRepositoryData().getIndices();
|
||||
final RepositoryData repositoryData = repositoriesService.repository(repositoryName).getRepositoryData();
|
||||
|
||||
clusterService.submitStateUpdateTask(request.cause(), new ClusterStateUpdateTask() {
|
||||
|
||||
|
@ -237,16 +235,7 @@ public class SnapshotsService extends AbstractLifecycleComponent implements Clus
|
|||
// Store newSnapshot here to be processed in clusterStateProcessed
|
||||
List<String> indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices()));
|
||||
logger.trace("[{}][{}] creating snapshot for indices [{}]", repositoryName, snapshotName, indices);
|
||||
List<IndexId> snapshotIndices = new ArrayList<>();
|
||||
for (String index : indices) {
|
||||
final IndexId indexId;
|
||||
if (indexIds.containsKey(index)) {
|
||||
indexId = indexIds.get(index).getIndexId();
|
||||
} else {
|
||||
indexId = new IndexId(index, UUIDs.randomBase64UUID());
|
||||
}
|
||||
snapshotIndices.add(indexId);
|
||||
}
|
||||
List<IndexId> snapshotIndices = repositoryData.resolveNewIndices(indices);
|
||||
newSnapshot = new SnapshotsInProgress.Entry(new Snapshot(repositoryName, snapshotId),
|
||||
request.includeGlobalState(),
|
||||
request.partial(),
|
||||
|
|
|
@ -46,7 +46,7 @@ import static org.hamcrest.Matchers.greaterThanOrEqualTo;
|
|||
* as blob names and repository blob formats have changed between the snapshot versions.
|
||||
*/
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST)
|
||||
// this test sometimes fails in recovery when the recovery is reset, increasing the logging level to help debug
|
||||
// this test sometimes fails in recovery when the recovery is reset, increasing the logging level to help debug
|
||||
@TestLogging("indices.recovery:DEBUG")
|
||||
public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase {
|
||||
|
||||
|
@ -70,7 +70,7 @@ public class RepositoryUpgradabilityIT extends AbstractSnapshotIntegTestCase {
|
|||
final Set<SnapshotInfo> snapshotInfos = Sets.newHashSet(getSnapshots(repoName));
|
||||
assertThat(snapshotInfos.size(), equalTo(1));
|
||||
SnapshotInfo originalSnapshot = snapshotInfos.iterator().next();
|
||||
assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", SnapshotId.UNASSIGNED_UUID)));
|
||||
assertThat(originalSnapshot.snapshotId(), equalTo(new SnapshotId("test_1", "test_1")));
|
||||
assertThat(Sets.newHashSet(originalSnapshot.indices()), equalTo(indices));
|
||||
|
||||
logger.info("--> restore the original snapshot");
|
||||
|
|
|
@ -103,7 +103,6 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.repositories.IndexId;
|
||||
import org.elasticsearch.repositories.Repository;
|
||||
import org.elasticsearch.repositories.RepositoryData;
|
||||
import org.elasticsearch.repositories.RepositoryData.IndexMeta;
|
||||
import org.elasticsearch.snapshots.Snapshot;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.snapshots.SnapshotInfo;
|
||||
|
@ -1675,8 +1674,8 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
@Override
|
||||
public RepositoryData getRepositoryData() {
|
||||
Map<String, IndexMeta> map = new HashMap<>();
|
||||
map.put(indexName, new IndexMeta(indexName, "blah", Collections.emptySet()));
|
||||
Map<IndexId, Set<SnapshotId>> map = new HashMap<>();
|
||||
map.put(new IndexId(indexName, "blah"), Collections.emptySet());
|
||||
return new RepositoryData(Collections.emptyList(), map);
|
||||
}
|
||||
@Override
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.common.xcontent.json.JsonXContent;
|
||||
import org.elasticsearch.repositories.RepositoryData.IndexMeta;
|
||||
import org.elasticsearch.snapshots.SnapshotId;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -48,7 +47,7 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
|
||||
public void testEqualsAndHashCode() {
|
||||
RepositoryData repositoryData1 = generateRandomRepoData();
|
||||
RepositoryData repositoryData2 = new RepositoryData(repositoryData1.getSnapshotIds(), repositoryData1.getIndices());
|
||||
RepositoryData repositoryData2 = repositoryData1.copy();
|
||||
assertEquals(repositoryData1, repositoryData2);
|
||||
assertEquals(repositoryData1.hashCode(), repositoryData2.hashCode());
|
||||
}
|
||||
|
@ -65,7 +64,7 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
RepositoryData repositoryData = generateRandomRepoData();
|
||||
// test that adding the same snapshot id to the repository data throws an exception
|
||||
final SnapshotId snapshotId = repositoryData.getSnapshotIds().get(0);
|
||||
Map<String, IndexMeta> indexMetaMap = repositoryData.getIndices();
|
||||
Map<String, IndexId> indexIdMap = repositoryData.getIndices();
|
||||
expectThrows(IllegalArgumentException.class,
|
||||
() -> repositoryData.addSnapshot(new SnapshotId(snapshotId.getName(), snapshotId.getUUID()), Collections.emptyList()));
|
||||
// test that adding a snapshot and its indices works
|
||||
|
@ -78,18 +77,16 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
newIndices.add(indexId);
|
||||
indices.add(indexId);
|
||||
}
|
||||
int numOld = randomIntBetween(1, indexMetaMap.size());
|
||||
List<String> indexNames = new ArrayList<>(indexMetaMap.keySet());
|
||||
int numOld = randomIntBetween(1, indexIdMap.size());
|
||||
List<String> indexNames = new ArrayList<>(indexIdMap.keySet());
|
||||
for (int i = 0; i < numOld; i++) {
|
||||
IndexId indexId = indexMetaMap.get(indexNames.get(i)).getIndexId();
|
||||
indices.add(indexId);
|
||||
indices.add(indexIdMap.get(indexNames.get(i)));
|
||||
}
|
||||
RepositoryData newRepoData = repositoryData.addSnapshot(newSnapshot, indices);
|
||||
// verify that the new repository data has the new snapshot and its indices
|
||||
assertTrue(newRepoData.getSnapshotIds().contains(newSnapshot));
|
||||
indexMetaMap = newRepoData.getIndices();
|
||||
for (IndexId indexId : indices) {
|
||||
Set<SnapshotId> snapshotIds = indexMetaMap.get(indexId.getName()).getSnapshotIds();
|
||||
Set<SnapshotId> snapshotIds = newRepoData.getSnapshots(indexId);
|
||||
assertTrue(snapshotIds.contains(newSnapshot));
|
||||
if (newIndices.contains(indexId)) {
|
||||
assertEquals(snapshotIds.size(), 1); // if it was a new index, only the new snapshot should be in its set
|
||||
|
@ -97,18 +94,20 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
public void testAddIndices() {
|
||||
public void testInitIndices() {
|
||||
final int numSnapshots = randomIntBetween(1, 30);
|
||||
final List<SnapshotId> snapshotIds = new ArrayList<>(numSnapshots);
|
||||
for (int i = 0; i < numSnapshots; i++) {
|
||||
snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), UUIDs.randomBase64UUID()));
|
||||
}
|
||||
RepositoryData repositoryData = new RepositoryData(snapshotIds, Collections.emptyMap());
|
||||
// test that adding indices works
|
||||
Map<String, IndexMeta> indices = randomIndices(snapshotIds);
|
||||
RepositoryData newRepoData = repositoryData.addIndices(indices);
|
||||
// test that initializing indices works
|
||||
Map<IndexId, Set<SnapshotId>> indices = randomIndices(snapshotIds);
|
||||
RepositoryData newRepoData = repositoryData.initIndices(indices);
|
||||
assertEquals(repositoryData.getSnapshotIds(), newRepoData.getSnapshotIds());
|
||||
assertEquals(indices, newRepoData.getIndices());
|
||||
for (IndexId indexId : indices.keySet()) {
|
||||
assertEquals(indices.get(indexId), newRepoData.getSnapshots(indexId));
|
||||
}
|
||||
}
|
||||
|
||||
public void testRemoveSnapshot() {
|
||||
|
@ -118,18 +117,18 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
SnapshotId removedSnapshotId = snapshotIds.remove(randomIntBetween(0, snapshotIds.size() - 1));
|
||||
RepositoryData newRepositoryData = repositoryData.removeSnapshot(removedSnapshotId);
|
||||
// make sure the repository data's indices no longer contain the removed snapshot
|
||||
for (IndexMeta indexMeta : newRepositoryData.getIndices().values()) {
|
||||
assertFalse(indexMeta.getSnapshotIds().contains(removedSnapshotId));
|
||||
for (final IndexId indexId : newRepositoryData.getIndices().values()) {
|
||||
assertFalse(newRepositoryData.getSnapshots(indexId).contains(removedSnapshotId));
|
||||
}
|
||||
}
|
||||
|
||||
public void testResolveIndexId() {
|
||||
RepositoryData repositoryData = generateRandomRepoData();
|
||||
Map<String, IndexMeta> indices = repositoryData.getIndices();
|
||||
Map<String, IndexId> indices = repositoryData.getIndices();
|
||||
Set<String> indexNames = indices.keySet();
|
||||
assertThat(indexNames.size(), greaterThan(0));
|
||||
String indexName = indexNames.iterator().next();
|
||||
IndexId indexId = indices.get(indexName).getIndexId();
|
||||
IndexId indexId = indices.get(indexName);
|
||||
assertEquals(indexId, repositoryData.resolveIndexId(indexName));
|
||||
String notInRepoData = randomAsciiOfLength(5);
|
||||
assertFalse(indexName.contains(notInRepoData));
|
||||
|
@ -154,18 +153,18 @@ public class RepositoryDataTests extends ESTestCase {
|
|||
return snapshotIds;
|
||||
}
|
||||
|
||||
private static Map<String, IndexMeta> randomIndices(final List<SnapshotId> snapshotIds) {
|
||||
private static Map<IndexId, Set<SnapshotId>> randomIndices(final List<SnapshotId> snapshotIds) {
|
||||
final int totalSnapshots = snapshotIds.size();
|
||||
final int numIndices = randomIntBetween(1, 30);
|
||||
final Map<String, IndexMeta> indices = new HashMap<>(numIndices);
|
||||
final Map<IndexId, Set<SnapshotId>> indices = new HashMap<>(numIndices);
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
final String indexName = randomAsciiOfLength(8);
|
||||
final IndexId indexId = new IndexId(randomAsciiOfLength(8), UUIDs.randomBase64UUID());
|
||||
final Set<SnapshotId> indexSnapshots = new LinkedHashSet<>();
|
||||
final int numIndicesForSnapshot = randomIntBetween(1, numIndices);
|
||||
for (int j = 0; j < numIndicesForSnapshot; j++) {
|
||||
indexSnapshots.add(snapshotIds.get(randomIntBetween(0, totalSnapshots - 1)));
|
||||
}
|
||||
indices.put(indexName, new IndexMeta(indexName, UUIDs.randomBase64UUID(), indexSnapshots));
|
||||
indices.put(indexId, indexSnapshots);
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
|||
import org.elasticsearch.common.io.stream.OutputStreamStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
@ -46,7 +45,6 @@ import java.util.List;
|
|||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.repositories.RepositoryDataTests.generateRandomRepoData;
|
||||
import static org.elasticsearch.repositories.blobstore.BlobStoreRepository.blobId;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
/**
|
||||
|
@ -113,7 +111,7 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
|
|||
|
||||
// write to and read from a index file with no entries
|
||||
assertThat(repository.getSnapshots().size(), equalTo(0));
|
||||
final RepositoryData emptyData = new RepositoryData(Collections.emptyList(), Collections.emptyMap());
|
||||
final RepositoryData emptyData = RepositoryData.EMPTY;
|
||||
repository.writeIndexGen(emptyData);
|
||||
final RepositoryData readData = repository.getRepositoryData();
|
||||
assertEquals(readData, emptyData);
|
||||
|
@ -161,37 +159,6 @@ public class BlobStoreRepositoryTests extends ESSingleNodeTestCase {
|
|||
assertThat(repository.readSnapshotIndexLatestBlob(), equalTo(2L));
|
||||
}
|
||||
|
||||
public void testOldIndexFileFormat() throws Exception {
|
||||
final BlobStoreRepository repository = setupRepo();
|
||||
|
||||
// write old index file format
|
||||
final int numOldSnapshots = randomIntBetween(1, 30);
|
||||
final List<SnapshotId> snapshotIds = new ArrayList<>();
|
||||
for (int i = 0; i < numOldSnapshots; i++) {
|
||||
snapshotIds.add(new SnapshotId(randomAsciiOfLength(8), SnapshotId.UNASSIGNED_UUID));
|
||||
}
|
||||
writeOldFormat(repository, snapshotIds.stream().map(SnapshotId::getName).collect(Collectors.toList()));
|
||||
assertThat(Sets.newHashSet(repository.getSnapshots()), equalTo(Sets.newHashSet(snapshotIds)));
|
||||
|
||||
// write to and read from a snapshot file with a random number of new entries added
|
||||
final RepositoryData repositoryData = generateRandomRepoData(snapshotIds);
|
||||
repository.writeIndexGen(repositoryData);
|
||||
assertEquals(repository.getRepositoryData(), repositoryData);
|
||||
}
|
||||
|
||||
public void testBlobId() {
|
||||
SnapshotId snapshotId = new SnapshotId("abc123", SnapshotId.UNASSIGNED_UUID);
|
||||
assertThat(blobId(snapshotId), equalTo("abc123")); // just the snapshot name
|
||||
snapshotId = new SnapshotId("abc-123", SnapshotId.UNASSIGNED_UUID);
|
||||
assertThat(blobId(snapshotId), equalTo("abc-123")); // just the snapshot name
|
||||
String uuid = UUIDs.randomBase64UUID();
|
||||
snapshotId = new SnapshotId("abc123", uuid);
|
||||
assertThat(blobId(snapshotId), equalTo(uuid)); // uuid only
|
||||
uuid = UUIDs.randomBase64UUID();
|
||||
snapshotId = new SnapshotId("abc-123", uuid);
|
||||
assertThat(blobId(snapshotId), equalTo(uuid)); // uuid only
|
||||
}
|
||||
|
||||
private BlobStoreRepository setupRepo() {
|
||||
final Client client = client();
|
||||
final Path location = ESIntegTestCase.randomRepoPath(node().settings());
|
||||
|
|
Loading…
Reference in New Issue