Rename MetaData.uuid -> MetaData.clusterUUID and IndexMetaData.uuid-> IndexMetaData.indexUUID
As explained in #11831, we currently have uuid fields on the cluster state, meta data and index metadata. The latter two are persistent across changes are being effectively used as a persistent uuid for this cluster and a persistent uuid for an index. The first (ClusterState.uuid) is ephemeral and changes with every change to the cluster state. This is confusing, We settled on having the following, new names: -> ClusterState.uuid -> stateUUID (transient) -> MetaData.uuid -> clusterUUID (persistent) -> IndexMetaData.uuid -> indexUUID (persistent). Closes #11914 Closes #11831
This commit is contained in:
parent
8e07b4fba4
commit
17906ca7d6
|
@ -472,7 +472,7 @@ public class Version {
|
|||
public static Version indexCreated(Settings indexSettings) {
|
||||
final Version indexVersion = indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null);
|
||||
if (indexVersion == null) {
|
||||
throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_UUID) + "]");
|
||||
throw new IllegalStateException("[" + IndexMetaData.SETTING_VERSION_CREATED + "] is not present in the index settings for index with uuid: [" + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) + "]");
|
||||
}
|
||||
return indexVersion;
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
|
|||
logger.trace("Serving cluster state request using version {}", currentState.version());
|
||||
ClusterState.Builder builder = ClusterState.builder(currentState.getClusterName());
|
||||
builder.version(currentState.version());
|
||||
builder.uuid(currentState.uuid());
|
||||
builder.stateUUID(currentState.stateUUID());
|
||||
if (request.nodes()) {
|
||||
builder.nodes(currentState.nodes());
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
|||
}
|
||||
}
|
||||
return new ClusterStatsResponse(System.currentTimeMillis(), clusterName,
|
||||
clusterService.state().metaData().uuid(), nodeStats.toArray(new ClusterStatsNodeResponse[nodeStats.size()]));
|
||||
clusterService.state().metaData().clusterUUID(), nodeStats.toArray(new ClusterStatsNodeResponse[nodeStats.size()]));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -897,7 +897,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
logger.trace("[{}] transport failure during replica request [{}] ", exp, node, replicaRequest);
|
||||
if (ignoreReplicaException(exp) == false) {
|
||||
logger.warn("failed to perform " + actionName + " on remote replica " + node + shardIt.shardId(), exp);
|
||||
shardStateAction.shardFailed(shard, indexMetaData.getUUID(),
|
||||
shardStateAction.shardFailed(shard, indexMetaData.getIndexUUID(),
|
||||
"Failed to perform [" + actionName + "] on replica, message [" + ExceptionsHelper.detailedMessage(exp) + "]");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ import java.util.Map;
|
|||
* to a node if this node was present in the previous version of the cluster state. If a node is not present was
|
||||
* not present in the previous version of the cluster state, such node is unlikely to have the previous cluster
|
||||
* state version and should be sent a complete version. In order to make sure that the differences are applied to
|
||||
* correct version of the cluster state, each cluster state version update generates {@link #uuid} that uniquely
|
||||
* correct version of the cluster state, each cluster state version update generates {@link #stateUUID} that uniquely
|
||||
* identifies this version of the state. This uuid is verified by the {@link ClusterStateDiff#apply} method to
|
||||
* makes sure that the correct diffs are applied. If uuids don’t match, the {@link ClusterStateDiff#apply} method
|
||||
* throws the {@link IncompatibleClusterStateVersionException}, which should cause the publishing mechanism to send
|
||||
|
@ -144,7 +144,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
|
||||
private final long version;
|
||||
|
||||
private final String uuid;
|
||||
private final String stateUUID;
|
||||
|
||||
private final RoutingTable routingTable;
|
||||
|
||||
|
@ -165,13 +165,13 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
|
||||
private volatile ClusterStateStatus status;
|
||||
|
||||
public ClusterState(long version, String uuid, ClusterState state) {
|
||||
this(state.clusterName, version, uuid, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false);
|
||||
public ClusterState(long version, String stateUUID, ClusterState state) {
|
||||
this(state.clusterName, version, stateUUID, state.metaData(), state.routingTable(), state.nodes(), state.blocks(), state.customs(), false);
|
||||
}
|
||||
|
||||
public ClusterState(ClusterName clusterName, long version, String uuid, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap<String, Custom> customs, boolean wasReadFromDiff) {
|
||||
public ClusterState(ClusterName clusterName, long version, String stateUUID, MetaData metaData, RoutingTable routingTable, DiscoveryNodes nodes, ClusterBlocks blocks, ImmutableOpenMap<String, Custom> customs, boolean wasReadFromDiff) {
|
||||
this.version = version;
|
||||
this.uuid = uuid;
|
||||
this.stateUUID = stateUUID;
|
||||
this.clusterName = clusterName;
|
||||
this.metaData = metaData;
|
||||
this.routingTable = routingTable;
|
||||
|
@ -200,11 +200,11 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
}
|
||||
|
||||
/**
|
||||
* This uuid is automatically generated for for each version of cluster state. It is used to make sure that
|
||||
* This stateUUID is automatically generated for for each version of cluster state. It is used to make sure that
|
||||
* we are applying diffs to the right previous state.
|
||||
*/
|
||||
public String uuid() {
|
||||
return this.uuid;
|
||||
public String stateUUID() {
|
||||
return this.stateUUID;
|
||||
}
|
||||
|
||||
public DiscoveryNodes nodes() {
|
||||
|
@ -283,7 +283,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
public String prettyPrint() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("version: ").append(version).append("\n");
|
||||
sb.append("uuid: ").append(uuid).append("\n");
|
||||
sb.append("state uuid: ").append(stateUUID).append("\n");
|
||||
sb.append("from_diff: ").append(wasReadFromDiff).append("\n");
|
||||
sb.append("meta data version: ").append(metaData.version()).append("\n");
|
||||
sb.append(nodes().prettyPrint());
|
||||
|
@ -362,7 +362,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
|
||||
if (metrics.contains(Metric.VERSION)) {
|
||||
builder.field("version", version);
|
||||
builder.field("uuid", uuid);
|
||||
builder.field("state_uuid", stateUUID);
|
||||
}
|
||||
|
||||
if (metrics.contains(Metric.MASTER_NODE)) {
|
||||
|
@ -571,7 +571,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
public Builder(ClusterState state) {
|
||||
this.clusterName = state.clusterName;
|
||||
this.version = state.version();
|
||||
this.uuid = state.uuid();
|
||||
this.uuid = state.stateUUID();
|
||||
this.nodes = state.nodes();
|
||||
this.routingTable = state.routingTable();
|
||||
this.metaData = state.metaData();
|
||||
|
@ -637,7 +637,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder uuid(String uuid) {
|
||||
public Builder stateUUID(String uuid) {
|
||||
this.uuid = uuid;
|
||||
return this;
|
||||
}
|
||||
|
@ -734,7 +734,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
public void writeTo(StreamOutput out) throws IOException {
|
||||
clusterName.writeTo(out);
|
||||
out.writeLong(version);
|
||||
out.writeString(uuid);
|
||||
out.writeString(stateUUID);
|
||||
metaData.writeTo(out);
|
||||
routingTable.writeTo(out);
|
||||
nodes.writeTo(out);
|
||||
|
@ -767,8 +767,8 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
private final Diff<ImmutableOpenMap<String, Custom>> customs;
|
||||
|
||||
public ClusterStateDiff(ClusterState before, ClusterState after) {
|
||||
fromUuid = before.uuid;
|
||||
toUuid = after.uuid;
|
||||
fromUuid = before.stateUUID;
|
||||
toUuid = after.stateUUID;
|
||||
toVersion = after.version;
|
||||
clusterName = after.clusterName;
|
||||
routingTable = after.routingTable.diff(before.routingTable);
|
||||
|
@ -816,14 +816,14 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
@Override
|
||||
public ClusterState apply(ClusterState state) {
|
||||
Builder builder = new Builder(clusterName);
|
||||
if (toUuid.equals(state.uuid)) {
|
||||
if (toUuid.equals(state.stateUUID)) {
|
||||
// no need to read the rest - cluster state didn't change
|
||||
return state;
|
||||
}
|
||||
if (fromUuid.equals(state.uuid) == false) {
|
||||
throw new IncompatibleClusterStateVersionException(state.version, state.uuid, toVersion, fromUuid);
|
||||
if (fromUuid.equals(state.stateUUID) == false) {
|
||||
throw new IncompatibleClusterStateVersionException(state.version, state.stateUUID, toVersion, fromUuid);
|
||||
}
|
||||
builder.uuid(toUuid);
|
||||
builder.stateUUID(toUuid);
|
||||
builder.version(toVersion);
|
||||
builder.routingTable(routingTable.apply(state.routingTable));
|
||||
builder.nodes(nodes.apply(state.nodes));
|
||||
|
|
|
@ -194,7 +194,7 @@ public class ShardStateAction extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
if (!indexMetaData.isSameUUID(shardRoutingEntry.indexUUID)) {
|
||||
logger.debug("{} ignoring shard {}, different index uuid, current {}, got {}", shardRouting.shardId(), type, indexMetaData.getUUID(), shardRoutingEntry);
|
||||
logger.debug("{} ignoring shard {}, different index uuid, current {}, got {}", shardRouting.shardId(), type, indexMetaData.getIndexUUID(), shardRoutingEntry);
|
||||
continue;
|
||||
}
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
public static final String SETTING_CREATION_DATE = "index.creation_date";
|
||||
public static final String SETTING_PRIORITY = "index.priority";
|
||||
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
|
||||
public static final String SETTING_UUID = "index.uuid";
|
||||
public static final String SETTING_INDEX_UUID = "index.uuid";
|
||||
public static final String SETTING_LEGACY_ROUTING_HASH_FUNCTION = "index.legacy.routing.hash.type";
|
||||
public static final String SETTING_LEGACY_ROUTING_USE_TYPE = "index.legacy.routing.use_type";
|
||||
public static final String SETTING_DATA_PATH = "index.data_path";
|
||||
|
@ -268,12 +268,12 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
return index();
|
||||
}
|
||||
|
||||
public String uuid() {
|
||||
return settings.get(SETTING_UUID, INDEX_UUID_NA_VALUE);
|
||||
public String indexUUID() {
|
||||
return settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
|
||||
}
|
||||
|
||||
public String getUUID() {
|
||||
return uuid();
|
||||
public String getIndexUUID() {
|
||||
return indexUUID();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -281,11 +281,11 @@ public class IndexMetaData implements Diffable<IndexMetaData> {
|
|||
*/
|
||||
public boolean isSameUUID(String otherUUID) {
|
||||
assert otherUUID != null;
|
||||
assert uuid() != null;
|
||||
if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(uuid())) {
|
||||
assert indexUUID() != null;
|
||||
if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(indexUUID())) {
|
||||
return true;
|
||||
}
|
||||
return otherUUID.equals(getUUID());
|
||||
return otherUUID.equals(getIndexUUID());
|
||||
}
|
||||
|
||||
public long version() {
|
||||
|
|
|
@ -132,7 +132,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
|
||||
public static final String CONTEXT_MODE_GATEWAY = XContentContext.GATEWAY.toString();
|
||||
|
||||
private final String uuid;
|
||||
private final String clusterUUID;
|
||||
private final long version;
|
||||
|
||||
private final Settings transientSettings;
|
||||
|
@ -154,8 +154,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
private final ImmutableOpenMap<String, String[]> aliasAndIndexToIndexMap;
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
MetaData(String uuid, long version, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap<String, IndexMetaData> indices, ImmutableOpenMap<String, IndexTemplateMetaData> templates, ImmutableOpenMap<String, Custom> customs) {
|
||||
this.uuid = uuid;
|
||||
MetaData(String clusterUUID, long version, Settings transientSettings, Settings persistentSettings, ImmutableOpenMap<String, IndexMetaData> indices, ImmutableOpenMap<String, IndexTemplateMetaData> templates, ImmutableOpenMap<String, Custom> customs) {
|
||||
this.clusterUUID = clusterUUID;
|
||||
this.version = version;
|
||||
this.transientSettings = transientSettings;
|
||||
this.persistentSettings = persistentSettings;
|
||||
|
@ -252,8 +252,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
return this.version;
|
||||
}
|
||||
|
||||
public String uuid() {
|
||||
return this.uuid;
|
||||
public String clusterUUID() {
|
||||
return this.clusterUUID;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -639,7 +639,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
|
||||
private long version;
|
||||
|
||||
private String uuid;
|
||||
private String clusterUUID;
|
||||
|
||||
private Settings transientSettings;
|
||||
private Settings persistentSettings;
|
||||
|
@ -649,7 +649,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
|
||||
|
||||
public MetaDataDiff(MetaData before, MetaData after) {
|
||||
uuid = after.uuid;
|
||||
clusterUUID = after.clusterUUID;
|
||||
version = after.version;
|
||||
transientSettings = after.transientSettings;
|
||||
persistentSettings = after.persistentSettings;
|
||||
|
@ -659,7 +659,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
}
|
||||
|
||||
public MetaDataDiff(StreamInput in) throws IOException {
|
||||
uuid = in.readString();
|
||||
clusterUUID = in.readString();
|
||||
version = in.readLong();
|
||||
transientSettings = Settings.readSettingsFromStream(in);
|
||||
persistentSettings = Settings.readSettingsFromStream(in);
|
||||
|
@ -680,7 +680,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeString(uuid);
|
||||
out.writeString(clusterUUID);
|
||||
out.writeLong(version);
|
||||
Settings.writeSettingsToStream(transientSettings, out);
|
||||
Settings.writeSettingsToStream(persistentSettings, out);
|
||||
|
@ -692,7 +692,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
@Override
|
||||
public MetaData apply(MetaData part) {
|
||||
Builder builder = builder();
|
||||
builder.uuid(uuid);
|
||||
builder.clusterUUID(clusterUUID);
|
||||
builder.version(version);
|
||||
builder.transientSettings(transientSettings);
|
||||
builder.persistentSettings(persistentSettings);
|
||||
|
@ -707,7 +707,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
public MetaData readFrom(StreamInput in) throws IOException {
|
||||
Builder builder = new Builder();
|
||||
builder.version = in.readLong();
|
||||
builder.uuid = in.readString();
|
||||
builder.clusterUUID = in.readString();
|
||||
builder.transientSettings(readSettingsFromStream(in));
|
||||
builder.persistentSettings(readSettingsFromStream(in));
|
||||
int size = in.readVInt();
|
||||
|
@ -730,7 +730,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeLong(version);
|
||||
out.writeString(uuid);
|
||||
out.writeString(clusterUUID);
|
||||
writeSettingsToStream(transientSettings, out);
|
||||
writeSettingsToStream(persistentSettings, out);
|
||||
out.writeVInt(indices.size());
|
||||
|
@ -817,7 +817,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
}
|
||||
|
||||
if (newPersistentSettings != null) {
|
||||
return new MetaData(metaData.uuid(),
|
||||
return new MetaData(metaData.clusterUUID(),
|
||||
metaData.version(),
|
||||
metaData.transientSettings(),
|
||||
newPersistentSettings.build(),
|
||||
|
@ -832,7 +832,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
|
||||
public static class Builder {
|
||||
|
||||
private String uuid;
|
||||
private String clusterUUID;
|
||||
private long version;
|
||||
|
||||
private Settings transientSettings = Settings.Builder.EMPTY_SETTINGS;
|
||||
|
@ -843,14 +843,14 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
private final ImmutableOpenMap.Builder<String, Custom> customs;
|
||||
|
||||
public Builder() {
|
||||
uuid = "_na_";
|
||||
clusterUUID = "_na_";
|
||||
indices = ImmutableOpenMap.builder();
|
||||
templates = ImmutableOpenMap.builder();
|
||||
customs = ImmutableOpenMap.builder();
|
||||
}
|
||||
|
||||
public Builder(MetaData metaData) {
|
||||
this.uuid = metaData.uuid;
|
||||
this.clusterUUID = metaData.clusterUUID;
|
||||
this.transientSettings = metaData.transientSettings;
|
||||
this.persistentSettings = metaData.persistentSettings;
|
||||
this.version = metaData.version;
|
||||
|
@ -988,20 +988,20 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder uuid(String uuid) {
|
||||
this.uuid = uuid;
|
||||
public Builder clusterUUID(String clusterUUID) {
|
||||
this.clusterUUID = clusterUUID;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder generateUuidIfNeeded() {
|
||||
if (uuid.equals("_na_")) {
|
||||
uuid = Strings.randomBase64UUID();
|
||||
public Builder generateClusterUuidIfNeeded() {
|
||||
if (clusterUUID.equals("_na_")) {
|
||||
clusterUUID = Strings.randomBase64UUID();
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public MetaData build() {
|
||||
return new MetaData(uuid, version, transientSettings, persistentSettings, indices.build(), templates.build(), customs.build());
|
||||
return new MetaData(clusterUUID, version, transientSettings, persistentSettings, indices.build(), templates.build(), customs.build());
|
||||
}
|
||||
|
||||
public static String toXContent(MetaData metaData) throws IOException {
|
||||
|
@ -1018,7 +1018,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
builder.startObject("meta-data");
|
||||
|
||||
builder.field("version", metaData.version());
|
||||
builder.field("uuid", metaData.uuid);
|
||||
builder.field("cluster_uuid", metaData.clusterUUID);
|
||||
|
||||
if (!metaData.persistentSettings().getAsMap().isEmpty()) {
|
||||
builder.startObject("settings");
|
||||
|
@ -1110,8 +1110,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData> {
|
|||
} else if (token.isValue()) {
|
||||
if ("version".equals(currentFieldName)) {
|
||||
builder.version = parser.longValue();
|
||||
} else if ("uuid".equals(currentFieldName)) {
|
||||
builder.uuid = parser.text();
|
||||
} else if ("cluster_uuid".equals(currentFieldName) || "uuid".equals(currentFieldName)) {
|
||||
builder.clusterUUID = parser.text();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -332,7 +332,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
indexSettingsBuilder.put(SETTING_CREATION_DATE, new DateTime(DateTimeZone.UTC).getMillis());
|
||||
}
|
||||
|
||||
indexSettingsBuilder.put(SETTING_UUID, Strings.randomBase64UUID());
|
||||
indexSettingsBuilder.put(SETTING_INDEX_UUID, Strings.randomBase64UUID());
|
||||
|
||||
Settings actualIndexSettings = indexSettingsBuilder.build();
|
||||
|
||||
|
|
|
@ -519,11 +519,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.uuid());
|
||||
logger.debug("processing [{}]: took {} done applying updated cluster_state (version: {}, uuid: {})", source, executionTime, newClusterState.version(), newClusterState.stateUUID());
|
||||
warnAboutSlowTaskIfNeeded(executionTime, source);
|
||||
} catch (Throwable t) {
|
||||
TimeValue executionTime = TimeValue.timeValueMillis(Math.max(0, TimeValue.nsecToMSec(System.nanoTime() - startTimeNS)));
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.uuid()).append("], source [").append(source).append("]\n");
|
||||
StringBuilder sb = new StringBuilder("failed to apply updated cluster state in ").append(executionTime).append(":\nversion [").append(newClusterState.version()).append("], uuid [").append(newClusterState.stateUUID()).append("], source [").append(source).append("]\n");
|
||||
sb.append(newClusterState.nodes().prettyPrint());
|
||||
sb.append(newClusterState.routingTable().prettyPrint());
|
||||
sb.append(newClusterState.readOnlyRoutingNodes().prettyPrint());
|
||||
|
|
|
@ -798,7 +798,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
MetaData.Builder metaDataBuilder = MetaData.builder(updatedState.metaData()).removeAllIndices();
|
||||
for (IndexMetaData indexMetaData : updatedState.metaData()) {
|
||||
IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.index());
|
||||
if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.uuid()) &&
|
||||
if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.indexUUID()) &&
|
||||
currentIndexMetaData.version() == indexMetaData.version()) {
|
||||
// safe to reuse
|
||||
metaDataBuilder.put(currentIndexMetaData, false);
|
||||
|
|
|
@ -266,7 +266,7 @@ public class PublishClusterStateAction extends AbstractComponent {
|
|||
} else if (lastSeenClusterState != null) {
|
||||
Diff<ClusterState> diff = lastSeenClusterState.readDiffFrom(in);
|
||||
lastSeenClusterState = diff.apply(lastSeenClusterState);
|
||||
logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.uuid(), request.bytes().length());
|
||||
logger.debug("received diff cluster state version {} with uuid {}, diff size {}", lastSeenClusterState.version(), lastSeenClusterState.stateUUID(), request.bytes().length());
|
||||
} else {
|
||||
logger.debug("received diff for but don't have any local cluster state - requesting full state");
|
||||
throw new IncompatibleClusterStateVersionException("have no local cluster state");
|
||||
|
|
|
@ -227,7 +227,7 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(recoveredState.metaData());
|
||||
// automatically generate a UID for the metadata if we need to
|
||||
metaDataBuilder.generateUuidIfNeeded();
|
||||
metaDataBuilder.generateClusterUuidIfNeeded();
|
||||
|
||||
if (recoveredState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false) || currentState.metaData().settings().getAsBoolean(MetaData.SETTING_READ_ONLY, false)) {
|
||||
blocks.addGlobalBlock(MetaData.CLUSTER_READ_ONLY_BLOCK);
|
||||
|
|
|
@ -69,7 +69,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
|
||||
@Override
|
||||
public void list(ShardId shardId, IndexMetaData indexMetaData, String[] nodesIds, ActionListener<NodesGatewayStartedShards> listener) {
|
||||
execute(new Request(shardId, indexMetaData.getUUID(), nodesIds), listener);
|
||||
execute(new Request(shardId, indexMetaData.getIndexUUID(), nodesIds), listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -251,7 +251,7 @@ public class IndexService extends AbstractIndexComponent implements IndexCompone
|
|||
}
|
||||
|
||||
public String indexUUID() {
|
||||
return indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
}
|
||||
|
||||
// NOTE: O(numShards) cost, but numShards should be smallish?
|
||||
|
|
|
@ -1337,10 +1337,10 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
private String getIndexUUID() {
|
||||
assert indexSettings.get(IndexMetaData.SETTING_UUID) != null
|
||||
assert indexSettings.get(IndexMetaData.SETTING_INDEX_UUID) != null
|
||||
|| indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT).before(Version.V_0_90_6) :
|
||||
"version: " + indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) + " uuid: " + indexSettings.get(IndexMetaData.SETTING_UUID);
|
||||
return indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
"version: " + indexSettings.getAsVersion(IndexMetaData.SETTING_VERSION_CREATED, null) + " uuid: " + indexSettings.get(IndexMetaData.SETTING_INDEX_UUID);
|
||||
return indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
}
|
||||
|
||||
private Tuple<DocumentMapper, Mapping> docMapper(String type) {
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
@ -29,9 +28,7 @@ import java.io.IOException;
|
|||
import java.nio.file.FileStore;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public final class ShardPath {
|
||||
|
@ -85,7 +82,7 @@ public final class ShardPath {
|
|||
* <b>Note:</b> this method resolves custom data locations for the shard.
|
||||
*/
|
||||
public static ShardPath loadShardPath(ESLogger logger, NodeEnvironment env, ShardId shardId, @IndexSettings Settings indexSettings) throws IOException {
|
||||
final String indexUUID = indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
final Path[] paths = env.availableShardPaths(shardId);
|
||||
Path loadedPath = null;
|
||||
for (Path path : paths) {
|
||||
|
@ -152,7 +149,7 @@ public final class ShardPath {
|
|||
final Path dataPath;
|
||||
final Path statePath;
|
||||
|
||||
final String indexUUID = indexSettings.get(IndexMetaData.SETTING_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
final String indexUUID = indexSettings.get(IndexMetaData.SETTING_INDEX_UUID, IndexMetaData.INDEX_UUID_NA_VALUE);
|
||||
|
||||
if (NodeEnvironment.hasCustomDataPath(indexSettings)) {
|
||||
dataPath = env.resolveCustomLocation(indexSettings, shardId);
|
||||
|
|
|
@ -448,7 +448,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
try {
|
||||
if (clusterState.metaData().hasIndex(indexName)) {
|
||||
final IndexMetaData index = clusterState.metaData().index(indexName);
|
||||
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]");
|
||||
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
|
||||
}
|
||||
deleteIndexStore(reason, metaData, clusterState);
|
||||
} catch (IOException e) {
|
||||
|
@ -467,13 +467,13 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
String indexName = metaData.index();
|
||||
if (indices.containsKey(indexName)) {
|
||||
String localUUid = indices.get(indexName).v1().indexUUID();
|
||||
throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid+ "] [" + metaData.getUUID() + "]");
|
||||
throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
|
||||
}
|
||||
if (clusterState.metaData().hasIndex(indexName) && (clusterState.nodes().localNode().masterNode() == true)) {
|
||||
// we do not delete the store if it is a master eligible node and the index is still in the cluster state
|
||||
// because we want to keep the meta data for indices around even if no shards are left here
|
||||
final IndexMetaData index = clusterState.metaData().index(indexName);
|
||||
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getUUID() + "] [" + metaData.getUUID() + "]");
|
||||
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
|
||||
}
|
||||
}
|
||||
Index index = new Index(metaData.index());
|
||||
|
|
|
@ -300,7 +300,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
try {
|
||||
indicesService.createIndex(indexMetaData.index(), indexMetaData.settings(), event.state().nodes().localNode().id());
|
||||
} catch (Throwable e) {
|
||||
sendFailShard(shard, indexMetaData.getUUID(), "failed to create index", e);
|
||||
sendFailShard(shard, indexMetaData.getIndexUUID(), "failed to create index", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +370,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
if (!typesToRefresh.isEmpty() && sendRefreshMapping) {
|
||||
nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
|
||||
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.uuid(),
|
||||
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.indexUUID(),
|
||||
typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())
|
||||
);
|
||||
}
|
||||
|
@ -493,14 +493,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!indexService.hasShard(shardId) && shardRouting.started()) {
|
||||
if (failedShards.containsKey(shardRouting.shardId())) {
|
||||
if (nodes.masterNode() != null) {
|
||||
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getUUID(),
|
||||
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(),
|
||||
"master " + nodes.masterNode() + " marked shard as started, but shard has previous failed. resending shard failure.",
|
||||
nodes.masterNode()
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// the master thinks we are started, but we don't have this shard at all, mark it as failed
|
||||
sendFailShard(shardRouting, indexMetaData.getUUID(), "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
|
||||
sendFailShard(shardRouting, indexMetaData.getIndexUUID(), "master [" + nodes.masterNode() + "] marked shard as started, but shard has not been created, mark shard as failed", null);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
@ -610,7 +610,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
indexShard.shardId(), indexShard.state(), nodes.masterNode());
|
||||
}
|
||||
if (nodes.masterNode() != null) {
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(),
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(),
|
||||
"master " + nodes.masterNode() + " marked shard as initializing, but shard state is [" + indexShard.state() + "], mark shard as started",
|
||||
nodes.masterNode());
|
||||
}
|
||||
|
@ -637,7 +637,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!indexService.hasShard(shardId)) {
|
||||
if (failedShards.containsKey(shardRouting.shardId())) {
|
||||
if (nodes.masterNode() != null) {
|
||||
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getUUID(),
|
||||
shardStateAction.resendShardFailed(shardRouting, indexMetaData.getIndexUUID(),
|
||||
"master " + nodes.masterNode() + " marked shard as initializing, but shard is marked as failed, resend shard failure",
|
||||
nodes.masterNode());
|
||||
}
|
||||
|
@ -686,7 +686,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
indexService.shard(shardId).recoverFromStore(indexShardRouting, new StoreRecoveryService.RecoveryListener() {
|
||||
@Override
|
||||
public void onRecoveryDone() {
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(), "after recovery from store");
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery from store");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -754,7 +754,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
|
||||
@Override
|
||||
public void onRecoveryDone(RecoveryState state) {
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getUUID(), "after recovery (replica) from node [" + state.getSourceNode() + "]");
|
||||
shardStateAction.shardStarted(shardRouting, indexMetaData.getIndexUUID(), "after recovery (replica) from node [" + state.getSourceNode() + "]");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -208,7 +208,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
|||
// TODO will have to ammend this for shadow replicas so we don't delete the shared copy...
|
||||
private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTable indexShardRoutingTable) {
|
||||
List<Tuple<DiscoveryNode, ShardActiveRequest>> requests = new ArrayList<>(indexShardRoutingTable.size());
|
||||
String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getUUID();
|
||||
String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getIndexUUID();
|
||||
ClusterName clusterName = state.getClusterName();
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
// Node can't be null, because otherwise shardCanBeDeleted() would have returned false
|
||||
|
|
|
@ -100,7 +100,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
SETTING_VERSION_CREATED,
|
||||
SETTING_LEGACY_ROUTING_HASH_FUNCTION,
|
||||
SETTING_LEGACY_ROUTING_USE_TYPE,
|
||||
SETTING_UUID,
|
||||
SETTING_INDEX_UUID,
|
||||
SETTING_CREATION_DATE);
|
||||
|
||||
// It's OK to change some settings, but we shouldn't allow simply removing them
|
||||
|
@ -221,7 +221,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
createIndexService.validateIndexName(renamedIndex, currentState);
|
||||
createIndexService.validateIndexSettings(renamedIndex, snapshotIndexMetaData.settings());
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex);
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID()));
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
if (!request.includeAliases() && !snapshotIndexMetaData.aliases().isEmpty()) {
|
||||
// Remove all aliases - they shouldn't be restored
|
||||
indexMdBuilder.removeAllAliases();
|
||||
|
@ -255,7 +255,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
aliases.add(alias.value);
|
||||
}
|
||||
}
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_UUID, currentIndexMetaData.uuid()));
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.indexUUID()));
|
||||
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndex).build();
|
||||
rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource);
|
||||
blocks.removeIndexBlock(renamedIndex, INDEX_CLOSED_BLOCK);
|
||||
|
|
|
@ -123,9 +123,9 @@ public class VersionTests extends ElasticsearchTestCase {
|
|||
}
|
||||
|
||||
public void testIndexCreatedVersion() {
|
||||
// an actual index has a IndexMetaData.SETTING_UUID
|
||||
// an actual index has a IndexMetaData.SETTING_INDEX_UUID
|
||||
final Version version = randomFrom(Version.V_0_18_0, Version.V_0_90_13, Version.V_1_3_0);
|
||||
assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
|
||||
assertEquals(version, Version.indexCreated(Settings.builder().put(IndexMetaData.SETTING_INDEX_UUID, "foo").put(IndexMetaData.SETTING_VERSION_CREATED, version).build()));
|
||||
}
|
||||
|
||||
public void testMinCompatVersion() {
|
||||
|
|
|
@ -247,7 +247,7 @@ public class ShardReplicationTests extends ElasticsearchTestCase {
|
|||
|
||||
ClusterState.Builder state = ClusterState.builder(new ClusterName("test"));
|
||||
state.nodes(discoBuilder);
|
||||
state.metaData(MetaData.builder().put(indexMetaData, false).generateUuidIfNeeded());
|
||||
state.metaData(MetaData.builder().put(indexMetaData, false).generateClusterUuidIfNeeded());
|
||||
state.routingTable(RoutingTable.builder().add(IndexRoutingTable.builder(index).addIndexShard(indexShardRoutingBuilder.build())));
|
||||
return state.build();
|
||||
}
|
||||
|
|
|
@ -87,7 +87,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase {
|
|||
return createMockNode(name, settings, version, new PublishClusterStateAction.NewClusterStateListener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
|
||||
logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.uuid());
|
||||
logger.debug("Node [{}] onNewClusterState version [{}], uuid [{}]", name, clusterState.version(), clusterState.stateUUID());
|
||||
newStateProcessed.onNewClusterStateProcessed();
|
||||
}
|
||||
});
|
||||
|
@ -392,7 +392,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase {
|
|||
MockNode nodeB = createMockNode("nodeB", noDiffPublishingSettings, Version.CURRENT, new PublishClusterStateAction.NewClusterStateListener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState clusterState, NewStateProcessed newStateProcessed) {
|
||||
logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.uuid(), clusterState.wasReadFromDiff());
|
||||
logger.debug("Got cluster state update, version [{}], guid [{}], from diff [{}]", clusterState.version(), clusterState.stateUUID(), clusterState.wasReadFromDiff());
|
||||
assertFalse(clusterState.wasReadFromDiff());
|
||||
newStateProcessed.onNewClusterStateProcessed();
|
||||
}
|
||||
|
@ -496,7 +496,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase {
|
|||
}
|
||||
});
|
||||
|
||||
ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.uuid(), clusterState) {
|
||||
ClusterState unserializableClusterState = new ClusterState(clusterState.version(), clusterState.stateUUID(), clusterState) {
|
||||
@Override
|
||||
public Diff<ClusterState> diff(ClusterState previousState) {
|
||||
return new Diff<ClusterState>() {
|
||||
|
@ -615,7 +615,7 @@ public class ClusterStateDiffPublishingTests extends ElasticsearchTestCase {
|
|||
public static class DelegatingClusterState extends ClusterState {
|
||||
|
||||
public DelegatingClusterState(ClusterState clusterState) {
|
||||
super(clusterState.version(), clusterState.uuid(), clusterState);
|
||||
super(clusterState.version(), clusterState.stateUUID(), clusterState);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ public class ClusterStateDiffTests extends ElasticsearchIntegrationTest {
|
|||
try {
|
||||
// Check non-diffable elements
|
||||
assertThat(clusterStateFromDiffs.version(), equalTo(clusterState.version()));
|
||||
assertThat(clusterStateFromDiffs.uuid(), equalTo(clusterState.uuid()));
|
||||
assertThat(clusterStateFromDiffs.stateUUID(), equalTo(clusterState.stateUUID()));
|
||||
|
||||
// Check nodes
|
||||
assertThat(clusterStateFromDiffs.nodes().nodes(), equalTo(clusterState.nodes().nodes()));
|
||||
|
@ -141,7 +141,7 @@ public class ClusterStateDiffTests extends ElasticsearchIntegrationTest {
|
|||
|
||||
// Check metadata
|
||||
assertThat(clusterStateFromDiffs.metaData().version(), equalTo(clusterState.metaData().version()));
|
||||
assertThat(clusterStateFromDiffs.metaData().uuid(), equalTo(clusterState.metaData().uuid()));
|
||||
assertThat(clusterStateFromDiffs.metaData().clusterUUID(), equalTo(clusterState.metaData().clusterUUID()));
|
||||
assertThat(clusterStateFromDiffs.metaData().transientSettings(), equalTo(clusterState.metaData().transientSettings()));
|
||||
assertThat(clusterStateFromDiffs.metaData().persistentSettings(), equalTo(clusterState.metaData().persistentSettings()));
|
||||
assertThat(clusterStateFromDiffs.metaData().indices(), equalTo(clusterState.metaData().indices()));
|
||||
|
@ -485,7 +485,7 @@ public class ClusterStateDiffTests extends ElasticsearchIntegrationTest {
|
|||
}
|
||||
break;
|
||||
case 2:
|
||||
builder.settings(Settings.builder().put(part.settings()).put(IndexMetaData.SETTING_UUID, Strings.randomBase64UUID()));
|
||||
builder.settings(Settings.builder().put(part.settings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
break;
|
||||
case 3:
|
||||
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
|
||||
|
|
|
@ -43,7 +43,7 @@ public class ShardStateActionTest extends ElasticsearchTestCase {
|
|||
final IndexMetaData indexMetaData = IndexMetaData.builder("test")
|
||||
.settings(Settings.builder()
|
||||
.put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_UUID, "test_uuid"))
|
||||
.put(IndexMetaData.SETTING_INDEX_UUID, "test_uuid"))
|
||||
.numberOfShards(2).numberOfReplicas(0)
|
||||
.build();
|
||||
ClusterState.Builder stateBuilder = ClusterState.builder(ClusterName.DEFAULT)
|
||||
|
@ -66,16 +66,16 @@ public class ShardStateActionTest extends ElasticsearchTestCase {
|
|||
ArrayList<ShardStateAction.ShardRoutingEntry> listToFilter = new ArrayList<>();
|
||||
ArrayList<ShardStateAction.ShardRoutingEntry> expectedToBeApplied = new ArrayList<>();
|
||||
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(initShard, indexMetaData.uuid() + "_suffix", "wrong_uuid"));
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(initShard, indexMetaData.indexUUID() + "_suffix", "wrong_uuid"));
|
||||
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(relocatingShard.buildTargetRelocatingShard(), indexMetaData.uuid(), "relocating_to_node"));
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(relocatingShard.buildTargetRelocatingShard(), indexMetaData.indexUUID(), "relocating_to_node"));
|
||||
expectedToBeApplied.add(listToFilter.get(listToFilter.size() - 1));
|
||||
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(startedShard, indexMetaData.uuid(), "started shard"));
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(startedShard, indexMetaData.indexUUID(), "started shard"));
|
||||
expectedToBeApplied.add(listToFilter.get(listToFilter.size() - 1));
|
||||
|
||||
listToFilter.add(new ShardStateAction.ShardRoutingEntry(TestShardRouting.newShardRouting(initShard.index() + "_NA", initShard.id(),
|
||||
initShard.currentNodeId(), initShard.primary(), initShard.state(), initShard.version()), indexMetaData.uuid(), "wrong_uuid"));
|
||||
initShard.currentNodeId(), initShard.primary(), initShard.state(), initShard.version()), indexMetaData.indexUUID(), "wrong_uuid"));
|
||||
|
||||
List<ShardStateAction.ShardRoutingEntry> toBeApplied = ShardStateAction.extractShardsToBeApplied(listToFilter, "for testing", state.metaData(), logger);
|
||||
if (toBeApplied.size() != expectedToBeApplied.size()) {
|
||||
|
|
|
@ -94,7 +94,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
|||
Files.copy(resource, dst);
|
||||
MetaData read = format.read(dst);
|
||||
assertThat(read, notNullValue());
|
||||
assertThat(read.uuid(), equalTo("3O1tDF1IRB6fSJ-GrTMUtg"));
|
||||
assertThat(read.clusterUUID(), equalTo("3O1tDF1IRB6fSJ-GrTMUtg"));
|
||||
// indices are empty since they are serialized separately
|
||||
}
|
||||
|
||||
|
@ -274,7 +274,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
|||
final MetaData meta = randomMeta();
|
||||
format.write(meta, v1, dirs);
|
||||
final MetaData metaData = format.loadLatestState(logger, dirs);
|
||||
assertEquals(meta.uuid(), metaData.uuid());
|
||||
assertEquals(meta.clusterUUID(), metaData.clusterUUID());
|
||||
final Path path = randomFrom(dirs);
|
||||
final Path[] files = FileSystemUtils.files(path.resolve("_state"));
|
||||
assertEquals(1, files.length);
|
||||
|
@ -295,12 +295,12 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
|||
final long v = randomInt(10);
|
||||
|
||||
MetaData meta = randomMeta();
|
||||
String uuid = meta.uuid();
|
||||
String uuid = meta.clusterUUID();
|
||||
|
||||
// write a first state file in the old format
|
||||
final Path dir2 = randomFrom(dirs);
|
||||
MetaData meta2 = randomMeta();
|
||||
assertFalse(meta2.uuid().equals(uuid));
|
||||
assertFalse(meta2.clusterUUID().equals(uuid));
|
||||
try (XContentBuilder xcontentBuilder = XContentFactory.contentBuilder(format.format(), Files.newOutputStream(dir2.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve(MetaStateService.GLOBAL_STATE_FILE_PREFIX + v)))) {
|
||||
xcontentBuilder.startObject();
|
||||
MetaData.Builder.toXContent(randomMeta(), xcontentBuilder, params);
|
||||
|
@ -313,7 +313,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
|||
MetaData state = format.loadLatestState(logger, dirs);
|
||||
final Path path = randomFrom(dirs);
|
||||
assertTrue(Files.exists(path.resolve(MetaDataStateFormat.STATE_DIR_NAME).resolve("global-" + (v+1) + ".st")));
|
||||
assertEquals(state.uuid(), uuid);
|
||||
assertEquals(state.clusterUUID(), uuid);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -358,8 +358,8 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
|||
Collections.shuffle(dirList, getRandom());
|
||||
MetaData loadedMetaData = format.loadLatestState(logger, dirList.toArray(new Path[0]));
|
||||
MetaData latestMetaData = meta.get(numStates-1);
|
||||
assertThat(loadedMetaData.uuid(), not(equalTo("_na_")));
|
||||
assertThat(loadedMetaData.uuid(), equalTo(latestMetaData.uuid()));
|
||||
assertThat(loadedMetaData.clusterUUID(), not(equalTo("_na_")));
|
||||
assertThat(loadedMetaData.clusterUUID(), equalTo(latestMetaData.clusterUUID()));
|
||||
ImmutableOpenMap<String,IndexMetaData> indices = loadedMetaData.indices();
|
||||
assertThat(indices.size(), equalTo(latestMetaData.indices().size()));
|
||||
for (IndexMetaData original : latestMetaData) {
|
||||
|
@ -392,7 +392,7 @@ public class MetaDataStateFormatTest extends ElasticsearchTestCase {
|
|||
private MetaData randomMeta() throws IOException {
|
||||
int numIndices = randomIntBetween(1, 10);
|
||||
MetaData.Builder mdBuilder = MetaData.builder();
|
||||
mdBuilder.generateUuidIfNeeded();
|
||||
mdBuilder.generateClusterUuidIfNeeded();
|
||||
for (int i = 0; i < numIndices; i++) {
|
||||
mdBuilder.put(indexBuilder(randomAsciiOfLength(10) + "idx-"+i));
|
||||
}
|
||||
|
|
|
@ -282,7 +282,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest {
|
|||
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 2);
|
||||
}
|
||||
|
||||
String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid();
|
||||
String metaDataUuid = client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID();
|
||||
assertThat(metaDataUuid, not(equalTo("_na_")));
|
||||
|
||||
logger.info("--> closing first node, and indexing more data to the second node");
|
||||
|
@ -325,7 +325,7 @@ public class RecoveryFromGatewayTests extends ElasticsearchIntegrationTest {
|
|||
logger.info("--> running cluster_health (wait for the shards to startup)");
|
||||
ensureGreen();
|
||||
|
||||
assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().uuid(), equalTo(metaDataUuid));
|
||||
assertThat(client().admin().cluster().prepareState().execute().get().getState().getMetaData().clusterUUID(), equalTo(metaDataUuid));
|
||||
|
||||
for (int i = 0; i < 10; i++) {
|
||||
assertHitCount(client().prepareCount().setQuery(matchAllQuery()).execute().actionGet(), 3);
|
||||
|
|
|
@ -147,39 +147,39 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest {
|
|||
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)));
|
||||
|
||||
routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
|
||||
shard.updateRoutingEntry(routing, true);
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)));
|
||||
|
||||
routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
|
||||
shard.updateRoutingEntry(routing, true);
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)));
|
||||
|
||||
// test if we still write it even if the shard is not active
|
||||
ShardRouting inactiveRouting = TestShardRouting.newShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(), shard.shardRouting.currentNodeId(), null, null, true, ShardRoutingState.INITIALIZING, shard.shardRouting.version() + 1);
|
||||
shard.persistMetadata(inactiveRouting, shard.shardRouting);
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
|
||||
assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)));
|
||||
|
||||
|
||||
shard.updateRoutingEntry(new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1), false);
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertFalse("shard state persisted despite of persist=false", shardStateMetaData.equals(getShardStateMetadata(shard)));
|
||||
assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
|
||||
assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)));
|
||||
|
||||
|
||||
routing = new ShardRouting(shard.shardRouting, shard.shardRouting.version() + 1);
|
||||
shard.updateRoutingEntry(routing, true);
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID)));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.version(), routing.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID)));
|
||||
}
|
||||
|
||||
public void testDeleteShardState() throws IOException {
|
||||
|
@ -232,7 +232,7 @@ public class IndexShardTests extends ElasticsearchSingleNodeTest {
|
|||
if (shardRouting == null) {
|
||||
return null;
|
||||
} else {
|
||||
return new ShardStateMetaData(shardRouting.version(), shardRouting.primary(), shard.indexSettings.get(IndexMetaData.SETTING_UUID));
|
||||
return new ShardStateMetaData(shardRouting.version(), shardRouting.primary(), shard.indexSettings.get(IndexMetaData.SETTING_INDEX_UUID));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ public class ShardPathTests extends ElasticsearchTestCase {
|
|||
|
||||
public void testLoadShardPath() throws IOException {
|
||||
try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF");
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF");
|
||||
Settings settings = builder.build();
|
||||
ShardId shardId = new ShardId("foo", 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
|
@ -53,7 +53,7 @@ public class ShardPathTests extends ElasticsearchTestCase {
|
|||
@Test(expected = IllegalStateException.class)
|
||||
public void testFailLoadShardPathOnMultiState() throws IOException {
|
||||
try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "0xDEADBEEF");
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "0xDEADBEEF");
|
||||
Settings settings = builder.build();
|
||||
ShardId shardId = new ShardId("foo", 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
|
@ -67,7 +67,7 @@ public class ShardPathTests extends ElasticsearchTestCase {
|
|||
@Test(expected = IllegalStateException.class)
|
||||
public void testFailLoadShardPathIndexUUIDMissmatch() throws IOException {
|
||||
try (final NodeEnvironment env = newNodeEnvironment(settingsBuilder().build())) {
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_UUID, "foobar");
|
||||
Settings.Builder builder = settingsBuilder().put(IndexMetaData.SETTING_INDEX_UUID, "foobar");
|
||||
Settings settings = builder.build();
|
||||
ShardId shardId = new ShardId("foo", 0);
|
||||
Path[] paths = env.availableShardPaths(shardId);
|
||||
|
|
|
@ -234,7 +234,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
.put("chunk_size", randomIntBetween(100, 1000), ByteSizeUnit.BYTES)));
|
||||
|
||||
createIndex("test");
|
||||
String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID);
|
||||
String originalIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID);
|
||||
assertTrue(originalIndexUUID, originalIndexUUID != null);
|
||||
assertFalse(originalIndexUUID, originalIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE));
|
||||
ensureGreen();
|
||||
|
@ -247,7 +247,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
assertAcked(prepareCreate("test").setSettings(Settings.builder()
|
||||
.put(SETTING_NUMBER_OF_SHARDS, numShards.numPrimaries)));
|
||||
ensureGreen();
|
||||
String newIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID);
|
||||
String newIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID);
|
||||
assertTrue(newIndexUUID, newIndexUUID != null);
|
||||
assertFalse(newIndexUUID, newIndexUUID.equals(IndexMetaData.INDEX_UUID_NA_VALUE));
|
||||
assertFalse(newIndexUUID, newIndexUUID.equals(originalIndexUUID));
|
||||
|
@ -259,7 +259,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
|
||||
ensureGreen();
|
||||
String newAfterRestoreIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_UUID);
|
||||
String newAfterRestoreIndexUUID = client().admin().indices().prepareGetSettings("test").get().getSetting("test", IndexMetaData.SETTING_INDEX_UUID);
|
||||
assertTrue("UUID has changed after restore: " + newIndexUUID + " vs. " + newAfterRestoreIndexUUID, newIndexUUID.equals(newAfterRestoreIndexUUID));
|
||||
|
||||
logger.info("--> restore indices with different names");
|
||||
|
@ -267,7 +267,7 @@ public class SharedClusterSnapshotRestoreTests extends AbstractSnapshotTests {
|
|||
.setRenamePattern("(.+)").setRenameReplacement("$1-copy").setWaitForCompletion(true).execute().actionGet();
|
||||
assertThat(restoreSnapshotResponse.getRestoreInfo().totalShards(), greaterThan(0));
|
||||
|
||||
String copyRestoreUUID = client().admin().indices().prepareGetSettings("test-copy").get().getSetting("test-copy", IndexMetaData.SETTING_UUID);
|
||||
String copyRestoreUUID = client().admin().indices().prepareGetSettings("test-copy").get().getSetting("test-copy", IndexMetaData.SETTING_INDEX_UUID);
|
||||
assertFalse("UUID has been reused on restore: " + copyRestoreUUID + " vs. " + originalIndexUUID, copyRestoreUUID.equals(originalIndexUUID));
|
||||
}
|
||||
|
||||
|
|
|
@ -1086,7 +1086,7 @@ public abstract class ElasticsearchIntegrationTest extends ElasticsearchTestCase
|
|||
// Check that the non-master node has the same version of the cluster state as the master and that this node didn't disconnect from the master
|
||||
if (masterClusterState.version() == localClusterState.version() && localClusterState.nodes().nodes().containsKey(masterId)) {
|
||||
try {
|
||||
assertEquals("clusterstate UUID does not match", masterClusterState.uuid(), localClusterState.uuid());
|
||||
assertEquals("clusterstate UUID does not match", masterClusterState.stateUUID(), localClusterState.stateUUID());
|
||||
// We cannot compare serialization bytes since serialization order of maps is not guaranteed
|
||||
// but we can compare serialization sizes - they should be the same
|
||||
assertEquals("clusterstate size does not match", masterClusterStateSize, localClusterStateSize);
|
||||
|
|
Loading…
Reference in New Issue