Cleanup IndexMetaData
This commit cleans up IndexMetaData. In particular, all duplicate getters (X and getX) have been collapsed into one (getX). Further, the number of shards and number of replicas settings are now parsed once and saved off as fields.
This commit is contained in:
parent
2e445d3ede
commit
88dd3256fb
|
@ -70,7 +70,7 @@ public class ClusterIndexHealth implements Iterable<ClusterShardHealth>, Streama
|
|||
}
|
||||
|
||||
public ClusterIndexHealth(IndexMetaData indexMetaData, IndexRoutingTable indexRoutingTable) {
|
||||
this.index = indexMetaData.index();
|
||||
this.index = indexMetaData.getIndex();
|
||||
this.numberOfShards = indexMetaData.getNumberOfShards();
|
||||
this.numberOfReplicas = indexMetaData.getNumberOfReplicas();
|
||||
this.validationFailures = indexRoutingTable.validate(indexMetaData);
|
||||
|
|
|
@ -74,7 +74,7 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction<Ty
|
|||
return;
|
||||
}
|
||||
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings = state.metaData().getIndices().get(concreteIndex).mappings();
|
||||
ImmutableOpenMap<String, MappingMetaData> mappings = state.metaData().getIndices().get(concreteIndex).getMappings();
|
||||
if (mappings.isEmpty()) {
|
||||
listener.onResponse(new TypesExistsResponse(false));
|
||||
return;
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
|
|||
continue;
|
||||
}
|
||||
|
||||
Settings settings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), indexMetaData.settings());
|
||||
Settings settings = SettingsFilter.filterSettings(settingsFilter.getPatterns(), indexMetaData.getSettings());
|
||||
if (request.humanReadable()) {
|
||||
settings = IndexMetaData.addHumanReadableSettings(settings);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
if (request.request().realtime && // if the realtime flag is set
|
||||
request.request().preference() == null && // the preference flag is not already set
|
||||
indexMeta != null && // and we have the index
|
||||
IndexMetaData.isIndexUsingShadowReplicas(indexMeta.settings())) { // and the index uses shadow replicas
|
||||
IndexMetaData.isIndexUsingShadowReplicas(indexMeta.getSettings())) { // and the index uses shadow replicas
|
||||
// set the preference for the request to use "_primary" automatically
|
||||
request.request().preference(Preference.PRIMARY.type());
|
||||
}
|
||||
|
|
|
@ -566,7 +566,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
routing(metaData.resolveIndexRouting(routing, index));
|
||||
// resolve timestamp if provided externally
|
||||
if (timestamp != null) {
|
||||
Version version = Version.indexCreated(metaData.getIndices().get(concreteIndex).settings());
|
||||
Version version = Version.indexCreated(metaData.getIndices().get(concreteIndex).getSettings());
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp,
|
||||
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER,
|
||||
version);
|
||||
|
@ -592,7 +592,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
if (parseContext.shouldParseTimestamp()) {
|
||||
timestamp = parseContext.timestamp();
|
||||
if (timestamp != null) {
|
||||
Version version = Version.indexCreated(metaData.getIndices().get(concreteIndex).settings());
|
||||
Version version = Version.indexCreated(metaData.getIndices().get(concreteIndex).getSettings());
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp, mappingMd.timestamp().dateTimeFormatter(), version);
|
||||
}
|
||||
}
|
||||
|
@ -642,7 +642,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
if (defaultTimestamp.equals(TimestampFieldMapper.Defaults.DEFAULT_TIMESTAMP)) {
|
||||
timestamp = Long.toString(System.currentTimeMillis());
|
||||
} else {
|
||||
Version version = Version.indexCreated(metaData.getIndices().get(concreteIndex).settings());
|
||||
Version version = Version.indexCreated(metaData.getIndices().get(concreteIndex).getSettings());
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), version);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -740,7 +740,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (shard.relocating()) {
|
||||
numberOfPendingShardInstances++;
|
||||
}
|
||||
} else if (shouldExecuteReplication(indexMetaData.settings()) == false) {
|
||||
} else if (shouldExecuteReplication(indexMetaData.getSettings()) == false) {
|
||||
// If the replicas use shadow replicas, there is no reason to
|
||||
// perform the action on the replica, so skip it and
|
||||
// immediately return
|
||||
|
@ -770,7 +770,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
// we have to replicate to the other copy
|
||||
numberOfPendingShardInstances += 1;
|
||||
}
|
||||
} else if (shouldExecuteReplication(indexMetaData.settings()) == false) {
|
||||
} else if (shouldExecuteReplication(indexMetaData.getSettings()) == false) {
|
||||
// If the replicas use shadow replicas, there is no reason to
|
||||
// perform the action on the replica, so skip it and
|
||||
// immediately return
|
||||
|
@ -849,7 +849,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
if (shard.relocating()) {
|
||||
performOnReplica(shard, shard.relocatingNodeId());
|
||||
}
|
||||
} else if (shouldExecuteReplication(indexMetaData.settings())) {
|
||||
} else if (shouldExecuteReplication(indexMetaData.getSettings())) {
|
||||
performOnReplica(shard, shard.currentNodeId());
|
||||
if (shard.relocating()) {
|
||||
performOnReplica(shard, shard.relocatingNodeId());
|
||||
|
|
|
@ -143,7 +143,7 @@ public class ClusterChangedEvent {
|
|||
if (previousMetaData == null) {
|
||||
return true;
|
||||
}
|
||||
IndexMetaData previousIndexMetaData = previousMetaData.index(current.index());
|
||||
IndexMetaData previousIndexMetaData = previousMetaData.index(current.getIndex());
|
||||
// no need to check on version, since disco modules will make sure to use the
|
||||
// same instance if its a version match
|
||||
if (previousIndexMetaData == current) {
|
||||
|
|
|
@ -449,17 +449,17 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
|
||||
builder.startObject("indices");
|
||||
for (IndexMetaData indexMetaData : metaData()) {
|
||||
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(indexMetaData.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
|
||||
builder.field("state", indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
|
||||
|
||||
builder.startObject("settings");
|
||||
Settings settings = indexMetaData.settings();
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
settings.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
|
||||
builder.startObject("mappings");
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
|
||||
byte[] mappingSource = cursor.value.source().uncompressed();
|
||||
XContentParser parser = XContentFactory.xContent(mappingSource).createParser(mappingSource);
|
||||
Map<String, Object> mapping = parser.map();
|
||||
|
@ -473,7 +473,7 @@ public class ClusterState implements ToXContent, Diffable<ClusterState> {
|
|||
builder.endObject();
|
||||
|
||||
builder.startArray("aliases");
|
||||
for (ObjectCursor<String> cursor : indexMetaData.aliases().keys()) {
|
||||
for (ObjectCursor<String> cursor : indexMetaData.getAliases().keys()) {
|
||||
builder.value(cursor.value);
|
||||
}
|
||||
builder.endArray();
|
||||
|
|
|
@ -282,30 +282,30 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
}
|
||||
|
||||
public Builder addBlocks(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
|
||||
addIndexBlock(indexMetaData.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
addIndexBlock(indexMetaData.getIndex(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
}
|
||||
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
|
||||
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_READ_ONLY, false)) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
}
|
||||
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
|
||||
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_READ, false)) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
|
||||
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_WRITE, false)) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
if (indexMetaData.settings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
|
||||
addIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
if (indexMetaData.getSettings().getAsBoolean(IndexMetaData.SETTING_BLOCKS_METADATA, false)) {
|
||||
addIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder updateBlocks(IndexMetaData indexMetaData) {
|
||||
removeIndexBlock(indexMetaData.index(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
removeIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
removeIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
removeIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
removeIndexBlock(indexMetaData.index(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex(), MetaDataIndexStateService.INDEX_CLOSED_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_ONLY_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
removeIndexBlock(indexMetaData.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
return addBlocks(indexMetaData);
|
||||
}
|
||||
|
||||
|
|
|
@ -168,8 +168,8 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
|
||||
public static final String INDEX_UUID_NA_VALUE = "_na_";
|
||||
|
||||
|
||||
|
||||
private final int numberOfShards;
|
||||
private final int numberOfReplicas;
|
||||
|
||||
private final String index;
|
||||
private final long version;
|
||||
|
@ -195,19 +195,32 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
private final org.apache.lucene.util.Version minimumCompatibleLuceneVersion;
|
||||
|
||||
private IndexMetaData(String index, long version, State state, Settings settings, ImmutableOpenMap<String, MappingMetaData> mappings, ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, Custom> customs) {
|
||||
if (settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null) == null) {
|
||||
Integer maybeNumberOfShards = settings.getAsInt(SETTING_NUMBER_OF_SHARDS, null);
|
||||
if (maybeNumberOfShards == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfShards for index [" + index + "]");
|
||||
}
|
||||
if (settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null) == null) {
|
||||
int numberOfShards = maybeNumberOfShards;
|
||||
if (numberOfShards <= 0) {
|
||||
throw new IllegalArgumentException("must specify positive number of shards for index [" + index + "]");
|
||||
}
|
||||
|
||||
Integer maybeNumberOfReplicas = settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (maybeNumberOfReplicas == null) {
|
||||
throw new IllegalArgumentException("must specify numberOfReplicas for index [" + index + "]");
|
||||
}
|
||||
int numberOfReplicas = maybeNumberOfReplicas;
|
||||
if (numberOfReplicas < 0) {
|
||||
throw new IllegalArgumentException("must specify non-negative number of shards for index [" + index + "]");
|
||||
}
|
||||
this.index = index;
|
||||
this.version = version;
|
||||
this.state = state;
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
this.customs = customs;
|
||||
this.totalNumberOfShards = numberOfShards() * (numberOfReplicas() + 1);
|
||||
this.numberOfShards = numberOfShards;
|
||||
this.numberOfReplicas = numberOfReplicas;
|
||||
this.totalNumberOfShards = numberOfShards * (numberOfReplicas + 1);
|
||||
this.aliases = aliases;
|
||||
|
||||
Map<String, String> requireMap = settings.getByPrefix("index.routing.allocation.require.").getAsMap();
|
||||
|
@ -242,20 +255,12 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
}
|
||||
|
||||
public String index() {
|
||||
public String getIndex() {
|
||||
return index;
|
||||
}
|
||||
|
||||
public String getIndex() {
|
||||
return index();
|
||||
}
|
||||
|
||||
public String indexUUID() {
|
||||
return settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
|
||||
}
|
||||
|
||||
public String getIndexUUID() {
|
||||
return indexUUID();
|
||||
return settings.get(SETTING_INDEX_UUID, INDEX_UUID_NA_VALUE);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -263,17 +268,13 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
*/
|
||||
public boolean isSameUUID(String otherUUID) {
|
||||
assert otherUUID != null;
|
||||
assert indexUUID() != null;
|
||||
if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(indexUUID())) {
|
||||
assert getIndexUUID() != null;
|
||||
if (INDEX_UUID_NA_VALUE.equals(otherUUID) || INDEX_UUID_NA_VALUE.equals(getIndexUUID())) {
|
||||
return true;
|
||||
}
|
||||
return otherUUID.equals(getIndexUUID());
|
||||
}
|
||||
|
||||
public long version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
public long getVersion() {
|
||||
return this.version;
|
||||
}
|
||||
|
@ -282,26 +283,18 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
* Return the {@link Version} on which this index has been created. This
|
||||
* information is typically useful for backward compatibility.
|
||||
*/
|
||||
public Version creationVersion() {
|
||||
return indexCreatedVersion;
|
||||
}
|
||||
|
||||
public Version getCreationVersion() {
|
||||
return creationVersion();
|
||||
return indexCreatedVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link Version} on which this index has been upgraded. This
|
||||
* information is typically useful for backward compatibility.
|
||||
*/
|
||||
public Version upgradeVersion() {
|
||||
public Version getUpgradedVersion() {
|
||||
return indexUpgradedVersion;
|
||||
}
|
||||
|
||||
public Version getUpgradeVersion() {
|
||||
return upgradeVersion();
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the {@link org.apache.lucene.util.Version} of the oldest lucene segment in the index
|
||||
*/
|
||||
|
@ -309,68 +302,36 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return minimumCompatibleLuceneVersion;
|
||||
}
|
||||
|
||||
public long creationDate() {
|
||||
public long getCreationDate() {
|
||||
return settings.getAsLong(SETTING_CREATION_DATE, -1l);
|
||||
}
|
||||
|
||||
public long getCreationDate() {
|
||||
return creationDate();
|
||||
}
|
||||
|
||||
public State state() {
|
||||
public State getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public State getState() {
|
||||
return state();
|
||||
}
|
||||
|
||||
public int numberOfShards() {
|
||||
return settings.getAsInt(SETTING_NUMBER_OF_SHARDS, -1);
|
||||
}
|
||||
|
||||
public int getNumberOfShards() {
|
||||
return numberOfShards();
|
||||
}
|
||||
|
||||
public int numberOfReplicas() {
|
||||
return settings.getAsInt(SETTING_NUMBER_OF_REPLICAS, -1);
|
||||
return numberOfShards;
|
||||
}
|
||||
|
||||
public int getNumberOfReplicas() {
|
||||
return numberOfReplicas();
|
||||
}
|
||||
|
||||
public int totalNumberOfShards() {
|
||||
return totalNumberOfShards;
|
||||
return numberOfReplicas;
|
||||
}
|
||||
|
||||
public int getTotalNumberOfShards() {
|
||||
return totalNumberOfShards();
|
||||
}
|
||||
|
||||
public Settings settings() {
|
||||
return settings;
|
||||
return totalNumberOfShards;
|
||||
}
|
||||
|
||||
public Settings getSettings() {
|
||||
return settings();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, AliasMetaData> aliases() {
|
||||
return this.aliases;
|
||||
return settings;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, AliasMetaData> getAliases() {
|
||||
return aliases();
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, MappingMetaData> mappings() {
|
||||
return mappings;
|
||||
return this.aliases;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, MappingMetaData> getMappings() {
|
||||
return mappings();
|
||||
return mappings;
|
||||
}
|
||||
|
||||
@Nullable
|
||||
|
@ -394,10 +355,6 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
return mappings.get(MapperService.DEFAULT_MAPPING);
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, Custom> customs() {
|
||||
return this.customs;
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, Custom> getCustoms() {
|
||||
return this.customs;
|
||||
}
|
||||
|
@ -621,10 +578,10 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
|
||||
public Builder(IndexMetaData indexMetaData) {
|
||||
this.index = indexMetaData.index();
|
||||
this.index = indexMetaData.getIndex();
|
||||
this.state = indexMetaData.state;
|
||||
this.version = indexMetaData.version;
|
||||
this.settings = indexMetaData.settings();
|
||||
this.settings = indexMetaData.getSettings();
|
||||
this.mappings = ImmutableOpenMap.builder(indexMetaData.mappings);
|
||||
this.aliases = ImmutableOpenMap.builder(indexMetaData.aliases);
|
||||
this.customs = ImmutableOpenMap.builder(indexMetaData.customs);
|
||||
|
@ -761,21 +718,21 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
|
||||
public static void toXContent(IndexMetaData indexMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(indexMetaData.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
|
||||
builder.field("version", indexMetaData.version());
|
||||
builder.field("state", indexMetaData.state().toString().toLowerCase(Locale.ENGLISH));
|
||||
builder.field("version", indexMetaData.getVersion());
|
||||
builder.field("state", indexMetaData.getState().toString().toLowerCase(Locale.ENGLISH));
|
||||
|
||||
boolean binary = params.paramAsBoolean("binary", false);
|
||||
|
||||
builder.startObject("settings");
|
||||
for (Map.Entry<String, String> entry : indexMetaData.settings().getAsMap().entrySet()) {
|
||||
for (Map.Entry<String, String> entry : indexMetaData.getSettings().getAsMap().entrySet()) {
|
||||
builder.field(entry.getKey(), entry.getValue());
|
||||
}
|
||||
builder.endObject();
|
||||
|
||||
builder.startArray("mappings");
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
|
||||
if (binary) {
|
||||
builder.value(cursor.value.source().compressed());
|
||||
} else {
|
||||
|
@ -788,14 +745,14 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
|
|||
}
|
||||
builder.endArray();
|
||||
|
||||
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.customs()) {
|
||||
for (ObjectObjectCursor<String, Custom> cursor : indexMetaData.getCustoms()) {
|
||||
builder.startObject(cursor.key, XContentBuilder.FieldCaseConversion.NONE);
|
||||
cursor.value.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.startObject("aliases");
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
|
||||
AliasMetaData.Builder.toXContent(cursor.value, builder, params);
|
||||
}
|
||||
builder.endObject();
|
||||
|
|
|
@ -253,7 +253,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
// Shouldn't happen
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias);
|
||||
AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias);
|
||||
boolean filteringRequired = aliasMetaData != null && aliasMetaData.filteringRequired();
|
||||
if (!filteringRequired) {
|
||||
return null;
|
||||
|
@ -272,7 +272,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
||||
AliasMetaData aliasMetaData = indexMetaData.aliases().get(alias);
|
||||
AliasMetaData aliasMetaData = indexMetaData.getAliases().get(alias);
|
||||
// Check that this is an alias for the current index
|
||||
// Otherwise - skip it
|
||||
if (aliasMetaData != null) {
|
||||
|
|
|
@ -184,8 +184,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
int totalNumberOfShards = 0;
|
||||
int numberOfShards = 0;
|
||||
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
|
||||
totalNumberOfShards += cursor.value.totalNumberOfShards();
|
||||
numberOfShards += cursor.value.numberOfShards();
|
||||
totalNumberOfShards += cursor.value.getTotalNumberOfShards();
|
||||
numberOfShards += cursor.value.getNumberOfShards();
|
||||
}
|
||||
this.totalNumberOfShards = totalNumberOfShards;
|
||||
this.numberOfShards = numberOfShards;
|
||||
|
@ -353,7 +353,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
|
||||
} else {
|
||||
filteredMappings = ImmutableOpenMap.builder();
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.mappings()) {
|
||||
for (ObjectObjectCursor<String, MappingMetaData> cursor : indexMetaData.getMappings()) {
|
||||
if (Regex.simpleMatch(types, cursor.key)) {
|
||||
filteredMappings.put(cursor.key, cursor.value);
|
||||
}
|
||||
|
@ -854,19 +854,19 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
// we know its a new one, increment the version and store
|
||||
indexMetaDataBuilder.version(indexMetaDataBuilder.version() + 1);
|
||||
IndexMetaData indexMetaData = indexMetaDataBuilder.build();
|
||||
indices.put(indexMetaData.index(), indexMetaData);
|
||||
indices.put(indexMetaData.getIndex(), indexMetaData);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder put(IndexMetaData indexMetaData, boolean incrementVersion) {
|
||||
if (indices.get(indexMetaData.index()) == indexMetaData) {
|
||||
if (indices.get(indexMetaData.getIndex()) == indexMetaData) {
|
||||
return this;
|
||||
}
|
||||
// if we put a new index metadata, increment its version
|
||||
if (incrementVersion) {
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).version(indexMetaData.version() + 1).build();
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).version(indexMetaData.getVersion() + 1).build();
|
||||
}
|
||||
indices.put(indexMetaData.index(), indexMetaData);
|
||||
indices.put(indexMetaData.getIndex(), indexMetaData);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -937,7 +937,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(settingsBuilder().put(indexMetaData.settings()).put(settings)));
|
||||
.settings(settingsBuilder().put(indexMetaData.getSettings()).put(settings)));
|
||||
}
|
||||
return this;
|
||||
}
|
||||
|
@ -1003,7 +1003,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
// do the required operations, the bottleneck isn't resolving expressions into concrete indices.
|
||||
List<String> allIndicesLst = new ArrayList<>();
|
||||
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
|
||||
allIndicesLst.add(cursor.value.index());
|
||||
allIndicesLst.add(cursor.value.getIndex());
|
||||
}
|
||||
String[] allIndices = allIndicesLst.toArray(new String[allIndicesLst.size()]);
|
||||
|
||||
|
@ -1011,10 +1011,10 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
List<String> allClosedIndicesLst = new ArrayList<>();
|
||||
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
|
||||
IndexMetaData indexMetaData = cursor.value;
|
||||
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
|
||||
allOpenIndicesLst.add(indexMetaData.index());
|
||||
} else if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
|
||||
allClosedIndicesLst.add(indexMetaData.index());
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
allOpenIndicesLst.add(indexMetaData.getIndex());
|
||||
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
allClosedIndicesLst.add(indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
String[] allOpenIndices = allOpenIndicesLst.toArray(new String[allOpenIndicesLst.size()]);
|
||||
|
|
|
@ -437,16 +437,16 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
}
|
||||
|
||||
indexService.indicesLifecycle().beforeIndexAddedToCluster(new Index(request.index()),
|
||||
indexMetaData.settings());
|
||||
indexMetaData.getSettings());
|
||||
|
||||
MetaData newMetaData = MetaData.builder(currentState.metaData())
|
||||
.put(indexMetaData, false)
|
||||
.build();
|
||||
|
||||
String maybeShadowIndicator = IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.settings()) ? "s" : "";
|
||||
String maybeShadowIndicator = IndexMetaData.isIndexUsingShadowReplicas(indexMetaData.getSettings()) ? "s" : "";
|
||||
logger.info("[{}] creating index, cause [{}], templates {}, shards [{}]/[{}{}], mappings {}",
|
||||
request.index(), request.cause(), templateNames, indexMetaData.numberOfShards(),
|
||||
indexMetaData.numberOfReplicas(), maybeShadowIndicator, mappings.keySet());
|
||||
request.index(), request.cause(), templateNames, indexMetaData.getNumberOfShards(),
|
||||
indexMetaData.getNumberOfReplicas(), maybeShadowIndicator, mappings.keySet());
|
||||
|
||||
ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
if (!request.blocks().isEmpty()) {
|
||||
|
|
|
@ -92,27 +92,27 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
String filter = aliasAction.filter();
|
||||
if (Strings.hasLength(filter)) {
|
||||
// parse the filter, in order to validate it
|
||||
IndexService indexService = indices.get(indexMetaData.index());
|
||||
IndexService indexService = indices.get(indexMetaData.getIndex());
|
||||
if (indexService == null) {
|
||||
indexService = indicesService.indexService(indexMetaData.index());
|
||||
indexService = indicesService.indexService(indexMetaData.getIndex());
|
||||
if (indexService == null) {
|
||||
// temporarily create the index and add mappings so we can parse the filter
|
||||
try {
|
||||
indexService = indicesService.createIndex(indexMetaData);
|
||||
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, false);
|
||||
}
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMetaData = cursor.value;
|
||||
indexService.mapperService().merge(mappingMetaData.type(), mappingMetaData.source(), false, false);
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.index());
|
||||
logger.warn("[{}] failed to temporary create in order to apply alias action", e, indexMetaData.getIndex());
|
||||
continue;
|
||||
}
|
||||
indicesToClose.add(indexMetaData.index());
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
}
|
||||
indices.put(indexMetaData.index(), indexService);
|
||||
indices.put(indexMetaData.getIndex(), indexService);
|
||||
}
|
||||
|
||||
aliasValidator.validateAliasFilter(aliasAction.alias(), filter, indexService.queryParserService());
|
||||
|
@ -124,14 +124,14 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
|||
.searchRouting(aliasAction.searchRouting())
|
||||
.build();
|
||||
// Check if this alias already exists
|
||||
AliasMetaData aliasMd = indexMetaData.aliases().get(aliasAction.alias());
|
||||
AliasMetaData aliasMd = indexMetaData.getAliases().get(aliasAction.alias());
|
||||
if (aliasMd != null && aliasMd.equals(newAliasMd)) {
|
||||
// It's the same alias - ignore it
|
||||
continue;
|
||||
}
|
||||
indexMetaDataBuilder.putAlias(newAliasMd);
|
||||
} else if (aliasAction.actionType() == AliasAction.Type.REMOVE) {
|
||||
if (!indexMetaData.aliases().containsKey(aliasAction.alias())) {
|
||||
if (!indexMetaData.getAliases().containsKey(aliasAction.alias())) {
|
||||
// This alias doesn't exist - ignore
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
||||
if (indexMetaData.state() != IndexMetaData.State.CLOSE) {
|
||||
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
|
||||
IndexRoutingTable indexRoutingTable = currentState.routingTable().index(index);
|
||||
for (IndexShardRoutingTable shard : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : shard) {
|
||||
|
@ -151,7 +151,7 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
if (indexMetaData.state() != IndexMetaData.State.OPEN) {
|
||||
if (indexMetaData.getState() != IndexMetaData.State.OPEN) {
|
||||
indicesToOpen.add(index);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.index.mapper.MapperService;
|
|||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.Set;
|
||||
|
||||
import static java.util.Collections.unmodifiableSet;
|
||||
|
@ -82,7 +81,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
* Checks if the index was already opened by this version of Elasticsearch and doesn't require any additional checks.
|
||||
*/
|
||||
private boolean isUpgraded(IndexMetaData indexMetaData) {
|
||||
return indexMetaData.upgradeVersion().onOrAfter(Version.V_3_0_0);
|
||||
return indexMetaData.getUpgradedVersion().onOrAfter(Version.V_3_0_0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -102,7 +101,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
* Returns true if this index can be supported by the current version of elasticsearch
|
||||
*/
|
||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.creationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
// The index was created with elasticsearch that was using Lucene 5.2.1
|
||||
return true;
|
||||
}
|
||||
|
@ -160,7 +159,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
if (indexMetaData.getCreationVersion().before(Version.V_2_0_0_beta1)) {
|
||||
// TODO: can we somehow only do this *once* for a pre-2.0 index? Maybe we could stuff a "fake marker setting" here? Seems hackish...
|
||||
// Created lazily if we find any settings that are missing units:
|
||||
Settings settings = indexMetaData.settings();
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
Settings.Builder newSettings = null;
|
||||
for(String byteSizeSetting : INDEX_BYTES_SIZE_SETTINGS) {
|
||||
String value = settings.get(byteSizeSetting);
|
||||
|
@ -199,7 +198,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
if (newSettings != null) {
|
||||
// At least one setting was changed:
|
||||
return IndexMetaData.builder(indexMetaData)
|
||||
.version(indexMetaData.version())
|
||||
.version(indexMetaData.getVersion())
|
||||
.settings(newSettings.build())
|
||||
.build();
|
||||
}
|
||||
|
@ -215,7 +214,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
*/
|
||||
private void checkMappingsCompatibility(IndexMetaData indexMetaData) {
|
||||
Index index = new Index(indexMetaData.getIndex());
|
||||
Settings settings = indexMetaData.settings();
|
||||
Settings settings = indexMetaData.getSettings();
|
||||
try {
|
||||
SimilarityService similarityService = new SimilarityService(index, settings);
|
||||
// We cannot instantiate real analysis server at this point because the node might not have
|
||||
|
@ -238,7 +237,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
* Marks index as upgraded so we don't have to test it again
|
||||
*/
|
||||
private IndexMetaData markAsUpgraded(IndexMetaData indexMetaData) {
|
||||
Settings settings = Settings.builder().put(indexMetaData.settings()).put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build();
|
||||
Settings settings = Settings.builder().put(indexMetaData.getSettings()).put(IndexMetaData.SETTING_VERSION_UPGRADED, Version.CURRENT).build();
|
||||
return IndexMetaData.builder(indexMetaData).settings(settings).build();
|
||||
}
|
||||
|
||||
|
|
|
@ -184,9 +184,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
for (String type : typesToIntroduce) {
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.mappings().containsKey(type)) {
|
||||
if (indexMetaData.getMappings().containsKey(type)) {
|
||||
// don't apply the default mapping, it has been applied when the mapping was created
|
||||
indexService.mapperService().merge(type, indexMetaData.mappings().get(type).source(), false, true);
|
||||
indexService.mapperService().merge(type, indexMetaData.getMappings().get(type).source(), false, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -351,14 +351,14 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
IndexService indexService = indicesService.createIndex(indexMetaData);
|
||||
indicesToClose.add(indexMetaData.index());
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
// make sure to add custom default mapping if exists
|
||||
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.mappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
indexService.mapperService().merge(MapperService.DEFAULT_MAPPING, indexMetaData.getMappings().get(MapperService.DEFAULT_MAPPING).source(), false, request.updateAllTypes());
|
||||
}
|
||||
// only add the current relevant mapping (if exists)
|
||||
if (indexMetaData.mappings().containsKey(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.mappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
if (indexMetaData.getMappings().containsKey(request.type())) {
|
||||
indexService.mapperService().merge(request.type(), indexMetaData.getMappings().get(request.type()).source(), false, request.updateAllTypes());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -391,7 +391,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.mappings().values()) {
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
|
||||
// we need to do this each time in case it was changed by update settings
|
||||
for (final IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
String autoExpandReplicas = indexMetaData.settings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
|
||||
String autoExpandReplicas = indexMetaData.getSettings().get(IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS);
|
||||
if (autoExpandReplicas != null && Booleans.parseBoolean(autoExpandReplicas, true)) { // Booleans only work for false values, just as we want it here
|
||||
try {
|
||||
final int min;
|
||||
|
@ -102,7 +102,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
final int dash = autoExpandReplicas.indexOf('-');
|
||||
if (-1 == dash) {
|
||||
logger.warn("failed to set [{}] for index [{}], it should be dash delimited [{}]",
|
||||
IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.index(), autoExpandReplicas);
|
||||
IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), autoExpandReplicas);
|
||||
continue;
|
||||
}
|
||||
final String sMin = autoExpandReplicas.substring(0, dash);
|
||||
|
@ -110,7 +110,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
min = Integer.parseInt(sMin);
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("failed to set [{}] for index [{}], minimum value is not a number [{}]",
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.index(), sMin);
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), sMin);
|
||||
continue;
|
||||
}
|
||||
String sMax = autoExpandReplicas.substring(dash + 1);
|
||||
|
@ -121,7 +121,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
max = Integer.parseInt(sMax);
|
||||
} catch (NumberFormatException e) {
|
||||
logger.warn("failed to set [{}] for index [{}], maximum value is neither [{}] nor a number [{}]",
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.index(), ALL_NODES_VALUE, sMax);
|
||||
e, IndexMetaData.SETTING_AUTO_EXPAND_REPLICAS, indexMetaData.getIndex(), ALL_NODES_VALUE, sMax);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -134,7 +134,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
}
|
||||
|
||||
// same value, nothing to do there
|
||||
if (numberOfReplicas == indexMetaData.numberOfReplicas()) {
|
||||
if (numberOfReplicas == indexMetaData.getNumberOfReplicas()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
@ -144,10 +144,10 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
nrReplicasChanged.put(numberOfReplicas, new ArrayList<String>());
|
||||
}
|
||||
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.index());
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.index());
|
||||
logger.warn("[{}] failed to parse auto expand replicas", e, indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -237,7 +237,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
Set<String> openIndices = new HashSet<>();
|
||||
Set<String> closeIndices = new HashSet<>();
|
||||
for (String index : actualIndices) {
|
||||
if (currentState.metaData().index(index).state() == IndexMetaData.State.OPEN) {
|
||||
if (currentState.metaData().index(index).getState() == IndexMetaData.State.OPEN) {
|
||||
openIndices.add(index);
|
||||
} else {
|
||||
closeIndices.add(index);
|
||||
|
@ -348,10 +348,10 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
String index = entry.getKey();
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData != null) {
|
||||
if (Version.CURRENT.equals(indexMetaData.creationVersion()) == false) {
|
||||
if (Version.CURRENT.equals(indexMetaData.getCreationVersion()) == false) {
|
||||
// No reason to pollute the settings, we didn't really upgrade anything
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData)
|
||||
.settings(settingsBuilder().put(indexMetaData.settings())
|
||||
.settings(settingsBuilder().put(indexMetaData.getSettings())
|
||||
.put(IndexMetaData.SETTING_VERSION_MINIMUM_COMPATIBLE, entry.getValue().v2())
|
||||
.put(IndexMetaData.SETTING_VERSION_UPGRADED, entry.getValue().v1())
|
||||
)
|
||||
|
|
|
@ -130,9 +130,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
ArrayList<String> failures = new ArrayList<>();
|
||||
|
||||
// check the number of shards
|
||||
if (indexMetaData.numberOfShards() != shards().size()) {
|
||||
if (indexMetaData.getNumberOfShards() != shards().size()) {
|
||||
Set<Integer> expected = new HashSet<>();
|
||||
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
|
||||
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
|
||||
expected.add(i);
|
||||
}
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : this) {
|
||||
|
@ -143,9 +143,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
// check the replicas
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : this) {
|
||||
int routingNumberOfReplicas = indexShardRoutingTable.size() - 1;
|
||||
if (routingNumberOfReplicas != indexMetaData.numberOfReplicas()) {
|
||||
if (routingNumberOfReplicas != indexMetaData.getNumberOfReplicas()) {
|
||||
failures.add("Shard [" + indexShardRoutingTable.shardId().id()
|
||||
+ "] routing table has wrong number of replicas, expected [" + indexMetaData.numberOfReplicas() + "], got [" + routingNumberOfReplicas + "]");
|
||||
+ "] routing table has wrong number of replicas, expected [" + indexMetaData.getNumberOfReplicas() + "], got [" + routingNumberOfReplicas + "]");
|
||||
}
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (!shardRouting.index().equals(index())) {
|
||||
|
@ -419,9 +419,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
if (!shards.isEmpty()) {
|
||||
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
|
||||
}
|
||||
for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId));
|
||||
for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
|
||||
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.getIndex(), shardId));
|
||||
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
|
||||
if (asNew && ignoreShards.contains(shardId)) {
|
||||
// This shards wasn't completely snapshotted - restore it as new shard
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
|
||||
|
@ -441,9 +441,9 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
if (!shards.isEmpty()) {
|
||||
throw new IllegalStateException("trying to initialize an index with fresh shards, but already has shards created");
|
||||
}
|
||||
for (int shardId = 0; shardId < indexMetaData.numberOfShards(); shardId++) {
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.index(), shardId));
|
||||
for (int i = 0; i <= indexMetaData.numberOfReplicas(); i++) {
|
||||
for (int shardId = 0; shardId < indexMetaData.getNumberOfShards(); shardId++) {
|
||||
IndexShardRoutingTable.Builder indexShardRoutingBuilder = new IndexShardRoutingTable.Builder(new ShardId(indexMetaData.getIndex(), shardId));
|
||||
for (int i = 0; i <= indexMetaData.getNumberOfReplicas(); i++) {
|
||||
indexShardRoutingBuilder.addShard(ShardRouting.newUnassigned(index, shardId, null, i == 0, unassignedInfo));
|
||||
}
|
||||
shards.put(shardId, indexShardRoutingBuilder.build());
|
||||
|
|
|
@ -241,7 +241,7 @@ public class OperationRouting extends AbstractComponent {
|
|||
} else {
|
||||
hash = Murmur3HashFunction.hash(routing);
|
||||
}
|
||||
return MathUtils.mod(hash, indexMetaData.numberOfShards());
|
||||
return MathUtils.mod(hash, indexMetaData.getNumberOfShards());
|
||||
}
|
||||
|
||||
private void ensureNodeIdExists(DiscoveryNodes nodes, String nodeId) {
|
||||
|
|
|
@ -445,8 +445,8 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
}
|
||||
|
||||
public Builder addAsNew(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsNew(indexMetaData);
|
||||
add(indexRoutingBuilder);
|
||||
}
|
||||
|
@ -454,8 +454,8 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
}
|
||||
|
||||
public Builder addAsRecovery(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsRecovery(indexMetaData);
|
||||
add(indexRoutingBuilder);
|
||||
}
|
||||
|
@ -463,8 +463,8 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
}
|
||||
|
||||
public Builder addAsFromDangling(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsFromDangling(indexMetaData);
|
||||
add(indexRoutingBuilder);
|
||||
}
|
||||
|
@ -472,8 +472,8 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
}
|
||||
|
||||
public Builder addAsFromCloseToOpen(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
|
||||
if (indexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsFromCloseToOpen(indexMetaData);
|
||||
add(indexRoutingBuilder);
|
||||
}
|
||||
|
@ -481,14 +481,14 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
|||
}
|
||||
|
||||
public Builder addAsRestore(IndexMetaData indexMetaData, RestoreSource restoreSource) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsRestore(indexMetaData, restoreSource);
|
||||
add(indexRoutingBuilder);
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder addAsNewRestore(IndexMetaData indexMetaData, RestoreSource restoreSource, IntSet ignoreShards) {
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.index())
|
||||
IndexRoutingTable.Builder indexRoutingBuilder = new IndexRoutingTable.Builder(indexMetaData.getIndex())
|
||||
.initializeAsNewRestore(indexMetaData, restoreSource, ignoreShards);
|
||||
add(indexRoutingBuilder);
|
||||
return this;
|
||||
|
|
|
@ -275,7 +275,7 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (IndexMetaData.isIndexUsingShadowReplicas(index.settings())) {
|
||||
if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) {
|
||||
routingNodes.reinitShadowPrimary(candidate);
|
||||
changed = true;
|
||||
}
|
||||
|
|
|
@ -262,7 +262,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
* Returns the average of shards per node for the given index
|
||||
*/
|
||||
public float avgShardsPerNode(String index) {
|
||||
return ((float) metaData.index(index).totalNumberOfShards()) / nodes.size();
|
||||
return ((float) metaData.index(index).getTotalNumberOfShards()) / nodes.size();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -166,7 +166,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.index());
|
||||
int shardCount = indexMetaData.numberOfReplicas() + 1; // 1 for primary
|
||||
int shardCount = indexMetaData.getNumberOfReplicas() + 1; // 1 for primary
|
||||
for (String awarenessAttribute : awarenessAttributes) {
|
||||
// the node the shard exists on must be associated with an awareness attribute
|
||||
if (!node.node().attributes().containsKey(awarenessAttribute)) {
|
||||
|
|
|
@ -82,7 +82,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
|
|||
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
|
||||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).settings();
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
|
||||
String enableIndexValue = indexSettings.get(INDEX_ROUTING_ALLOCATION_ENABLE);
|
||||
final Allocation enable;
|
||||
if (enableIndexValue != null) {
|
||||
|
@ -118,7 +118,7 @@ public class EnableAllocationDecider extends AllocationDecider implements NodeSe
|
|||
return allocation.decision(Decision.YES, NAME, "rebalance disabling is ignored");
|
||||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).settings();
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
|
||||
String enableIndexValue = indexSettings.get(INDEX_ROUTING_REBALANCE_ENABLE);
|
||||
final Rebalance enable;
|
||||
if (enableIndexValue != null) {
|
||||
|
|
|
@ -64,7 +64,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
int totalShardsPerNode = indexMd.settings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
int totalShardsPerNode = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
if (totalShardsPerNode <= 0) {
|
||||
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [%d] <= 0", totalShardsPerNode);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
int totalShardsPerNode = indexMd.settings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
int totalShardsPerNode = indexMd.getSettings().getAsInt(INDEX_TOTAL_SHARDS_PER_NODE, -1);
|
||||
if (totalShardsPerNode <= 0) {
|
||||
return allocation.decision(Decision.YES, NAME, "total shard limit disabled: [%d] <= 0", totalShardsPerNode);
|
||||
}
|
||||
|
|
|
@ -742,9 +742,9 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
// if its not the same version, only copy over new indices or ones that changed the version
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(newClusterState.metaData()).removeAllIndices();
|
||||
for (IndexMetaData indexMetaData : newClusterState.metaData()) {
|
||||
IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.index());
|
||||
if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.indexUUID()) &&
|
||||
currentIndexMetaData.version() == indexMetaData.version()) {
|
||||
IndexMetaData currentIndexMetaData = currentState.metaData().index(indexMetaData.getIndex());
|
||||
if (currentIndexMetaData != null && currentIndexMetaData.isSameUUID(indexMetaData.getIndexUUID()) &&
|
||||
currentIndexMetaData.getVersion() == indexMetaData.getVersion()) {
|
||||
// safe to reuse
|
||||
metaDataBuilder.put(currentIndexMetaData, false);
|
||||
} else {
|
||||
|
|
|
@ -120,8 +120,8 @@ public class DanglingIndicesState extends AbstractComponent {
|
|||
IndexMetaData indexMetaData = metaStateService.loadIndexState(indexName);
|
||||
if (indexMetaData != null) {
|
||||
logger.info("[{}] dangling index, exists on local file system, but not in cluster metadata, auto import to cluster state", indexName);
|
||||
if (!indexMetaData.index().equals(indexName)) {
|
||||
logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.index());
|
||||
if (!indexMetaData.getIndex().equals(indexName)) {
|
||||
logger.info("dangled index directory name is [{}], state name is [{}], renaming to directory name", indexName, indexMetaData.getIndex());
|
||||
indexMetaData = IndexMetaData.builder(indexMetaData).index(indexName).build();
|
||||
}
|
||||
newIndices.put(indexName, indexMetaData);
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.Arrays;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -96,7 +95,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
electedGlobalState = nodeState.metaData();
|
||||
}
|
||||
for (ObjectCursor<IndexMetaData> cursor : nodeState.metaData().indices().values()) {
|
||||
indices.addTo(cursor.value.index(), 1);
|
||||
indices.addTo(cursor.value.getIndex(), 1);
|
||||
}
|
||||
}
|
||||
if (found < requiredAllocation) {
|
||||
|
@ -123,7 +122,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
}
|
||||
if (electedIndexMetaData == null) {
|
||||
electedIndexMetaData = indexMetaData;
|
||||
} else if (indexMetaData.version() > electedIndexMetaData.version()) {
|
||||
} else if (indexMetaData.getVersion() > electedIndexMetaData.getVersion()) {
|
||||
electedIndexMetaData = indexMetaData;
|
||||
}
|
||||
indexMetaDataCount++;
|
||||
|
|
|
@ -131,11 +131,11 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
Set<String> newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size());
|
||||
for (IndexMetaData indexMetaData : newMetaData) {
|
||||
IndexMetaData indexMetaDataOnDisk = null;
|
||||
if (indexMetaData.state().equals(IndexMetaData.State.CLOSE)) {
|
||||
indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.index());
|
||||
if (indexMetaData.getState().equals(IndexMetaData.State.CLOSE)) {
|
||||
indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex());
|
||||
}
|
||||
if (indexMetaDataOnDisk != null) {
|
||||
newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.index());
|
||||
newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex());
|
||||
}
|
||||
}
|
||||
newPreviouslyWrittenIndices.addAll(previouslyWrittenIndices);
|
||||
|
@ -274,8 +274,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
String writeReason = null;
|
||||
if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) {
|
||||
writeReason = "freshly created";
|
||||
} else if (previousIndexMetaData.version() != newIndexMetaData.version()) {
|
||||
writeReason = "version changed from [" + previousIndexMetaData.version() + "] to [" + newIndexMetaData.version() + "]";
|
||||
} else if (previousIndexMetaData.getVersion() != newIndexMetaData.getVersion()) {
|
||||
writeReason = "version changed from [" + previousIndexMetaData.getVersion() + "] to [" + newIndexMetaData.getVersion() + "]";
|
||||
}
|
||||
if (writeReason != null) {
|
||||
indicesToWrite.add(new GatewayMetaState.IndexMetaWriteInfo(newIndexMetaData, previousIndexMetaData, writeReason));
|
||||
|
@ -295,12 +295,12 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
// we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously
|
||||
for (IndexMetaData indexMetaData : state.metaData()) {
|
||||
boolean isOrWasClosed = indexMetaData.state().equals(IndexMetaData.State.CLOSE);
|
||||
boolean isOrWasClosed = indexMetaData.getState().equals(IndexMetaData.State.CLOSE);
|
||||
// if the index is open we might still have to write the state if it just transitioned from closed to open
|
||||
// so we have to check for that as well.
|
||||
IndexMetaData previousMetaData = previousState.metaData().getIndices().get(indexMetaData.getIndex());
|
||||
if (previousMetaData != null) {
|
||||
isOrWasClosed = isOrWasClosed || previousMetaData.state().equals(IndexMetaData.State.CLOSE);
|
||||
isOrWasClosed = isOrWasClosed || previousMetaData.getState().equals(IndexMetaData.State.CLOSE);
|
||||
}
|
||||
if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && isOrWasClosed) {
|
||||
indices.add(indexMetaData.getIndex());
|
||||
|
|
|
@ -116,7 +116,7 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
|||
public void messageReceived(final AllocateDangledRequest request, final TransportChannel channel) throws Exception {
|
||||
String[] indexNames = new String[request.indices.length];
|
||||
for (int i = 0; i < request.indices.length; i++) {
|
||||
indexNames[i] = request.indices[i].index();
|
||||
indexNames[i] = request.indices[i].getIndex();
|
||||
}
|
||||
clusterService.submitStateUpdateTask("allocation dangled indices " + Arrays.toString(indexNames), new ClusterStateUpdateTask() {
|
||||
@Override
|
||||
|
@ -131,12 +131,12 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
|||
boolean importNeeded = false;
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (IndexMetaData indexMetaData : request.indices) {
|
||||
if (currentState.metaData().hasIndex(indexMetaData.index())) {
|
||||
if (currentState.metaData().hasIndex(indexMetaData.getIndex())) {
|
||||
continue;
|
||||
}
|
||||
if (currentState.metaData().hasAlias(indexMetaData.index())) {
|
||||
if (currentState.metaData().hasAlias(indexMetaData.getIndex())) {
|
||||
logger.warn("ignoring dangled index [{}] on node [{}] due to an existing alias with the same name",
|
||||
indexMetaData.index(), request.fromNode);
|
||||
indexMetaData.getIndex(), request.fromNode);
|
||||
continue;
|
||||
}
|
||||
importNeeded = true;
|
||||
|
@ -149,15 +149,15 @@ public class LocalAllocateDangledIndices extends AbstractComponent {
|
|||
} catch (Exception ex) {
|
||||
// upgrade failed - adding index as closed
|
||||
logger.warn("found dangled index [{}] on node [{}]. This index cannot be upgraded to the latest version, adding as closed", ex,
|
||||
indexMetaData.index(), request.fromNode);
|
||||
upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.version() + 1).build();
|
||||
indexMetaData.getIndex(), request.fromNode);
|
||||
upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE).version(indexMetaData.getVersion() + 1).build();
|
||||
}
|
||||
metaData.put(upgradedIndexMetaData, false);
|
||||
blocks.addBlocks(upgradedIndexMetaData);
|
||||
if (upgradedIndexMetaData.getState() == IndexMetaData.State.OPEN) {
|
||||
routingTableBuilder.addAsFromDangling(upgradedIndexMetaData);
|
||||
}
|
||||
sb.append("[").append(upgradedIndexMetaData.index()).append("/").append(upgradedIndexMetaData.state()).append("]");
|
||||
sb.append("[").append(upgradedIndexMetaData.getIndex()).append("/").append(upgradedIndexMetaData.getState()).append("]");
|
||||
}
|
||||
if (!importNeeded) {
|
||||
return currentState;
|
||||
|
|
|
@ -130,13 +130,13 @@ public class MetaStateService extends AbstractComponent {
|
|||
* Writes the index state.
|
||||
*/
|
||||
void writeIndex(String reason, IndexMetaData indexMetaData, @Nullable IndexMetaData previousIndexMetaData) throws Exception {
|
||||
logger.trace("[{}] writing state, reason [{}]", indexMetaData.index(), reason);
|
||||
logger.trace("[{}] writing state, reason [{}]", indexMetaData.getIndex(), reason);
|
||||
try {
|
||||
indexStateFormat.write(indexMetaData, indexMetaData.version(),
|
||||
nodeEnv.indexPaths(new Index(indexMetaData.index())));
|
||||
indexStateFormat.write(indexMetaData, indexMetaData.getVersion(),
|
||||
nodeEnv.indexPaths(new Index(indexMetaData.getIndex())));
|
||||
} catch (Throwable ex) {
|
||||
logger.warn("[{}]: failed to write index state", ex, indexMetaData.index());
|
||||
throw new IOException("failed to write state for [" + indexMetaData.index() + "]", ex);
|
||||
logger.warn("[{}]: failed to write index state", ex, indexMetaData.getIndex());
|
||||
throw new IOException("failed to write state for [" + indexMetaData.getIndex() + "]", ex);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
|
||||
IndexMetaData indexMetaData = metaData.index(shard.getIndex());
|
||||
|
||||
NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexMetaData.settings()), allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
NodesAndVersions nodesAndVersions = buildNodesAndVersions(shard, recoverOnAnyNode(indexMetaData.getSettings()), allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
logger.debug("[{}][{}] found {} allocations of {}, highest version: [{}]", shard.index(), shard.id(), nodesAndVersions.allocationsFound, shard, nodesAndVersions.highestVersion);
|
||||
|
||||
if (isEnoughAllocationsFound(shard, indexMetaData, nodesAndVersions) == false) {
|
||||
|
@ -135,22 +135,22 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
// if we restore from a repository one copy is more then enough
|
||||
if (shard.restoreSource() == null) {
|
||||
try {
|
||||
String initialShards = indexMetaData.settings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
|
||||
String initialShards = indexMetaData.getSettings().get(INDEX_RECOVERY_INITIAL_SHARDS, settings.get(INDEX_RECOVERY_INITIAL_SHARDS, this.initialShards));
|
||||
if ("quorum".equals(initialShards)) {
|
||||
if (indexMetaData.numberOfReplicas() > 1) {
|
||||
requiredAllocation = ((1 + indexMetaData.numberOfReplicas()) / 2) + 1;
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2) + 1;
|
||||
}
|
||||
} else if ("quorum-1".equals(initialShards) || "half".equals(initialShards)) {
|
||||
if (indexMetaData.numberOfReplicas() > 2) {
|
||||
requiredAllocation = ((1 + indexMetaData.numberOfReplicas()) / 2);
|
||||
if (indexMetaData.getNumberOfReplicas() > 2) {
|
||||
requiredAllocation = ((1 + indexMetaData.getNumberOfReplicas()) / 2);
|
||||
}
|
||||
} else if ("one".equals(initialShards)) {
|
||||
requiredAllocation = 1;
|
||||
} else if ("full".equals(initialShards) || "all".equals(initialShards)) {
|
||||
requiredAllocation = indexMetaData.numberOfReplicas() + 1;
|
||||
requiredAllocation = indexMetaData.getNumberOfReplicas() + 1;
|
||||
} else if ("full-1".equals(initialShards) || "all-1".equals(initialShards)) {
|
||||
if (indexMetaData.numberOfReplicas() > 1) {
|
||||
requiredAllocation = indexMetaData.numberOfReplicas();
|
||||
if (indexMetaData.getNumberOfReplicas() > 1) {
|
||||
requiredAllocation = indexMetaData.getNumberOfReplicas();
|
||||
}
|
||||
} else {
|
||||
requiredAllocation = Integer.parseInt(initialShards);
|
||||
|
|
|
@ -128,7 +128,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
if (metaData != null) {
|
||||
ShardPath shardPath = null;
|
||||
try {
|
||||
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.settings());
|
||||
shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.getSettings());
|
||||
if (shardPath == null) {
|
||||
throw new IllegalStateException(shardId + " no shard path found");
|
||||
}
|
||||
|
|
|
@ -472,7 +472,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
}
|
||||
deleteIndexStore(reason, metaData, clusterState, true);
|
||||
} catch (IOException e) {
|
||||
logger.warn("[{}] failed to delete closed index", e, metaData.index());
|
||||
logger.warn("[{}] failed to delete closed index", e, metaData.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -484,7 +484,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
public void deleteIndexStore(String reason, IndexMetaData metaData, ClusterState clusterState, boolean closed) throws IOException {
|
||||
if (nodeEnv.hasNodeFile()) {
|
||||
synchronized (this) {
|
||||
String indexName = metaData.index();
|
||||
String indexName = metaData.getIndex();
|
||||
if (indices.containsKey(indexName)) {
|
||||
String localUUid = indices.get(indexName).getIndexService().indexUUID();
|
||||
throw new IllegalStateException("Can't delete index store for [" + indexName + "] - it's still part of the indices service [" + localUUid + "] [" + metaData.getIndexUUID() + "]");
|
||||
|
@ -496,7 +496,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
throw new IllegalStateException("Can't delete closed index store for [" + indexName + "] - it's still part of the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]");
|
||||
}
|
||||
}
|
||||
Index index = new Index(metaData.index());
|
||||
Index index = new Index(metaData.getIndex());
|
||||
final Settings indexSettings = buildIndexSettings(metaData);
|
||||
deleteIndexStore(reason, index, indexSettings, closed);
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public final class IndicesWarmer extends AbstractComponent {
|
|||
if (indexMetaData == null) {
|
||||
return;
|
||||
}
|
||||
if (!indexMetaData.settings().getAsBoolean(INDEX_WARMER_ENABLED, settings.getAsBoolean(INDEX_WARMER_ENABLED, true))) {
|
||||
if (!indexMetaData.getSettings().getAsBoolean(INDEX_WARMER_ENABLED, settings.getAsBoolean(INDEX_WARMER_ENABLED, true))) {
|
||||
return;
|
||||
}
|
||||
IndexService indexService = indicesService.indexService(context.shardId().index().name());
|
||||
|
|
|
@ -46,11 +46,9 @@ import org.elasticsearch.search.query.QueryPhase;
|
|||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
import static org.elasticsearch.common.Strings.hasLength;
|
||||
|
||||
|
@ -212,7 +210,7 @@ public class IndicesRequestCache extends AbstractComponent implements RemovalLis
|
|||
}
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.requestCache() == null) {
|
||||
if (!isCacheEnabled(index.settings(), Boolean.FALSE)) {
|
||||
if (!isCacheEnabled(index.getSettings(), Boolean.FALSE)) {
|
||||
return false;
|
||||
}
|
||||
} else if (!request.requestCache()) {
|
||||
|
|
|
@ -184,7 +184,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
for (IndexService indexService : indicesService) {
|
||||
String index = indexService.index().getName();
|
||||
IndexMetaData indexMetaData = event.state().metaData().index(index);
|
||||
if (indexMetaData != null && indexMetaData.state() == IndexMetaData.State.CLOSE) {
|
||||
if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
for (Integer shardId : indexService.shardIds()) {
|
||||
logger.debug("[{}][{}] removing shard (index is closed)", index, shardId);
|
||||
try {
|
||||
|
@ -216,8 +216,8 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
IndexMetaData indexMetaData = event.state().metaData().index(indexService.index().name());
|
||||
if (indexMetaData != null) {
|
||||
if (!indexMetaData.isSameUUID(indexService.indexUUID())) {
|
||||
logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.index());
|
||||
deleteIndex(indexMetaData.index(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
|
||||
logger.debug("[{}] mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated", indexMetaData.getIndex());
|
||||
deleteIndex(indexMetaData.getIndex(), "mismatch on index UUIDs between cluster state and local state, cleaning the index so it will be recreated");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
} else {
|
||||
final IndexMetaData metaData = previousState.metaData().index(index);
|
||||
assert metaData != null;
|
||||
indexSettings = metaData.settings();
|
||||
indexSettings = metaData.getSettings();
|
||||
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
|
||||
}
|
||||
try {
|
||||
|
@ -268,7 +268,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
for (Integer existingShardId : indexService.shardIds()) {
|
||||
if (!newShardIds.contains(existingShardId)) {
|
||||
if (indexMetaData.state() == IndexMetaData.State.CLOSE) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}][{}] removing shard (index is closed)", index, existingShardId);
|
||||
}
|
||||
|
@ -296,7 +296,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!indicesService.hasIndex(shard.index())) {
|
||||
final IndexMetaData indexMetaData = event.state().metaData().index(shard.index());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] creating index", indexMetaData.index());
|
||||
logger.debug("[{}] creating index", indexMetaData.getIndex());
|
||||
}
|
||||
try {
|
||||
indicesService.createIndex(indexMetaData);
|
||||
|
@ -312,7 +312,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
return;
|
||||
}
|
||||
for (IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
if (!indicesService.hasIndex(indexMetaData.index())) {
|
||||
if (!indicesService.hasIndex(indexMetaData.getIndex())) {
|
||||
// we only create / update here
|
||||
continue;
|
||||
}
|
||||
|
@ -320,14 +320,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!event.indexMetaDataChanged(indexMetaData)) {
|
||||
continue;
|
||||
}
|
||||
String index = indexMetaData.index();
|
||||
String index = indexMetaData.getIndex();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
// already deleted on us, ignore it
|
||||
continue;
|
||||
}
|
||||
IndexSettingsService indexSettingsService = indexService.settingsService();
|
||||
indexSettingsService.refreshSettings(indexMetaData.settings());
|
||||
indexSettingsService.refreshSettings(indexMetaData.getSettings());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,12 +335,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
private void applyMappings(ClusterChangedEvent event) {
|
||||
// go over and update mappings
|
||||
for (IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
if (!indicesService.hasIndex(indexMetaData.index())) {
|
||||
if (!indicesService.hasIndex(indexMetaData.getIndex())) {
|
||||
// we only create / update here
|
||||
continue;
|
||||
}
|
||||
List<String> typesToRefresh = new ArrayList<>();
|
||||
String index = indexMetaData.index();
|
||||
String index = indexMetaData.getIndex();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
// got deleted on us, ignore (closing the node)
|
||||
|
@ -349,7 +349,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
try {
|
||||
MapperService mapperService = indexService.mapperService();
|
||||
// first, go over and update the _default_ mapping (if exists)
|
||||
if (indexMetaData.mappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
if (indexMetaData.getMappings().containsKey(MapperService.DEFAULT_MAPPING)) {
|
||||
boolean requireRefresh = processMapping(index, mapperService, MapperService.DEFAULT_MAPPING, indexMetaData.mapping(MapperService.DEFAULT_MAPPING).source());
|
||||
if (requireRefresh) {
|
||||
typesToRefresh.add(MapperService.DEFAULT_MAPPING);
|
||||
|
@ -357,7 +357,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
|
||||
// go over and add the relevant mappings (or update them)
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.mappings().values()) {
|
||||
for (ObjectCursor<MappingMetaData> cursor : indexMetaData.getMappings().values()) {
|
||||
MappingMetaData mappingMd = cursor.value;
|
||||
String mappingType = mappingMd.type();
|
||||
CompressedXContent mappingSource = mappingMd.source();
|
||||
|
@ -371,7 +371,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
if (!typesToRefresh.isEmpty() && sendRefreshMapping) {
|
||||
nodeMappingRefreshAction.nodeMappingRefresh(event.state(),
|
||||
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.indexUUID(),
|
||||
new NodeMappingRefreshAction.NodeMappingRefreshRequest(index, indexMetaData.getIndexUUID(),
|
||||
typesToRefresh.toArray(new String[typesToRefresh.size()]), event.state().nodes().localNodeId())
|
||||
);
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (aliasesChanged(event)) {
|
||||
// go over and update aliases
|
||||
for (IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
String index = indexMetaData.index();
|
||||
String index = indexMetaData.getIndex();
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) {
|
||||
// we only create / update here
|
||||
|
|
|
@ -118,7 +118,7 @@ public class SyncedFlushService extends AbstractComponent {
|
|||
int numberOfShards = 0;
|
||||
for (String index : concreteIndices) {
|
||||
final IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
totalNumberOfShards += indexMetaData.totalNumberOfShards();
|
||||
totalNumberOfShards += indexMetaData.getTotalNumberOfShards();
|
||||
numberOfShards += indexMetaData.getNumberOfShards();
|
||||
results.put(index, Collections.synchronizedList(new ArrayList<ShardsSyncedFlushResult>()));
|
||||
|
||||
|
@ -241,7 +241,7 @@ public class SyncedFlushService extends AbstractComponent {
|
|||
final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.index().name());
|
||||
if (indexRoutingTable == null) {
|
||||
IndexMetaData index = state.getMetaData().index(shardId.index().getName());
|
||||
if (index != null && index.state() == IndexMetaData.State.CLOSE) {
|
||||
if (index != null && index.getState() == IndexMetaData.State.CLOSE) {
|
||||
throw new IndexClosedException(shardId.index());
|
||||
}
|
||||
throw new IndexNotFoundException(shardId.index().getName());
|
||||
|
|
|
@ -169,11 +169,11 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
|||
if (metaData == null) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
String storeType = metaData.settings().get(IndexStoreModule.STORE_TYPE, "fs");
|
||||
String storeType = metaData.getSettings().get(IndexStoreModule.STORE_TYPE, "fs");
|
||||
if (!storeType.contains("fs")) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.settings());
|
||||
final ShardPath shardPath = ShardPath.loadShardPath(logger, nodeEnv, shardId, metaData.getSettings());
|
||||
if (shardPath == null) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
|
|||
if (indexMetaData == null) {
|
||||
continue;
|
||||
}
|
||||
boolean disablePurge = indexMetaData.settings().getAsBoolean(INDEX_TTL_DISABLE_PURGE, false);
|
||||
boolean disablePurge = indexMetaData.getSettings().getAsBoolean(INDEX_TTL_DISABLE_PURGE, false);
|
||||
if (disablePurge) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -70,10 +70,10 @@ public class RestGetIndicesAliasesAction extends BaseRestHandler {
|
|||
|
||||
final boolean isAllAliasesRequested = isAllOrWildcard(aliases);
|
||||
for (IndexMetaData indexMetaData : metaData) {
|
||||
builder.startObject(indexMetaData.index(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject(indexMetaData.getIndex(), XContentBuilder.FieldCaseConversion.NONE);
|
||||
builder.startObject("aliases");
|
||||
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.aliases().values()) {
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
|
||||
if (isAllAliasesRequested || Regex.simpleMatch(aliases, cursor.value.alias())) {
|
||||
AliasMetaData.Builder.toXContent(cursor.value, builder, ToXContent.EMPTY_PARAMS);
|
||||
}
|
||||
|
|
|
@ -328,8 +328,8 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getDocs().getCount());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getDocs().getDeleted());
|
||||
|
||||
table.addCell(indexMetaData.creationDate());
|
||||
table.addCell(new DateTime(indexMetaData.creationDate(), DateTimeZone.UTC));
|
||||
table.addCell(indexMetaData.getCreationDate());
|
||||
table.addCell(new DateTime(indexMetaData.getCreationDate(), DateTimeZone.UTC));
|
||||
|
||||
table.addCell(indexStats == null ? null : indexStats.getTotal().getStore().size());
|
||||
table.addCell(indexStats == null ? null : indexStats.getPrimaries().getStore().size());
|
||||
|
|
|
@ -180,7 +180,7 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
IndexMetaData indexMeta = state.getState().getMetaData().index(shard.index());
|
||||
boolean usesShadowReplicas = false;
|
||||
if (indexMeta != null) {
|
||||
usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.settings());
|
||||
usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.getSettings());
|
||||
}
|
||||
if (shard.primary()) {
|
||||
table.addCell("p");
|
||||
|
|
|
@ -179,7 +179,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
|||
// once an index is closed we can just clean up all the pending search context information
|
||||
// to release memory and let references to the filesystem go etc.
|
||||
IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index.getName());
|
||||
if (idxMeta != null && idxMeta.state() == IndexMetaData.State.CLOSE) {
|
||||
if (idxMeta != null && idxMeta.getState() == IndexMetaData.State.CLOSE) {
|
||||
// we need to check if it's really closed
|
||||
// since sometimes due to a relocation we already closed the shard and that causes the index to be closed
|
||||
// if we then close all the contexts we can get some search failures along the way which are not expected.
|
||||
|
@ -846,7 +846,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
|||
|
||||
@Override
|
||||
public TerminationHandle warmNewReaders(final IndexShard indexShard, IndexMetaData indexMetaData, final WarmerContext context, ThreadPool threadPool) {
|
||||
final Loading defaultLoading = Loading.parse(indexMetaData.settings().get(NORMS_LOADING_KEY), Loading.LAZY);
|
||||
final Loading defaultLoading = Loading.parse(indexMetaData.getSettings().get(NORMS_LOADING_KEY), Loading.LAZY);
|
||||
final MapperService mapperService = indexShard.mapperService();
|
||||
final ObjectSet<String> warmUp = new ObjectHashSet<>();
|
||||
for (DocumentMapper docMapper : mapperService.docMappers(false)) {
|
||||
|
@ -1064,7 +1064,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> {
|
|||
SearchContext context = null;
|
||||
try {
|
||||
long now = System.nanoTime();
|
||||
ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.numberOfShards(),
|
||||
ShardSearchRequest request = new ShardSearchLocalRequest(indexShard.shardId(), indexMetaData.getNumberOfShards(),
|
||||
SearchType.QUERY_THEN_FETCH, entry.source(), entry.types(), entry.requestCache());
|
||||
context = createContext(request, warmerContext.searcher());
|
||||
// if we use sort, we need to do query to sort on it and load relevant field data
|
||||
|
|
|
@ -251,14 +251,14 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
// Index doesn't exist - create it and start recovery
|
||||
// Make sure that the index we are about to create has a validate name
|
||||
createIndexService.validateIndexName(renamedIndex, currentState);
|
||||
createIndexService.validateIndexSettings(renamedIndex, snapshotIndexMetaData.settings());
|
||||
createIndexService.validateIndexSettings(renamedIndex, snapshotIndexMetaData.getSettings());
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN).index(renamedIndex);
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
if (!request.includeAliases() && !snapshotIndexMetaData.aliases().isEmpty()) {
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
if (!request.includeAliases() && !snapshotIndexMetaData.getAliases().isEmpty()) {
|
||||
// Remove all aliases - they shouldn't be restored
|
||||
indexMdBuilder.removeAllAliases();
|
||||
} else {
|
||||
for (ObjectCursor<String> alias : snapshotIndexMetaData.aliases().keys()) {
|
||||
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
|
||||
aliases.add(alias.value);
|
||||
}
|
||||
}
|
||||
|
@ -273,22 +273,22 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
validateExistingIndex(currentIndexMetaData, snapshotIndexMetaData, renamedIndex, partial);
|
||||
// Index exists and it's closed - open it in metadata and start recovery
|
||||
IndexMetaData.Builder indexMdBuilder = IndexMetaData.builder(snapshotIndexMetaData).state(IndexMetaData.State.OPEN);
|
||||
indexMdBuilder.version(Math.max(snapshotIndexMetaData.version(), currentIndexMetaData.version() + 1));
|
||||
indexMdBuilder.version(Math.max(snapshotIndexMetaData.getVersion(), currentIndexMetaData.getVersion() + 1));
|
||||
if (!request.includeAliases()) {
|
||||
// Remove all snapshot aliases
|
||||
if (!snapshotIndexMetaData.aliases().isEmpty()) {
|
||||
if (!snapshotIndexMetaData.getAliases().isEmpty()) {
|
||||
indexMdBuilder.removeAllAliases();
|
||||
}
|
||||
/// Add existing aliases
|
||||
for (ObjectCursor<AliasMetaData> alias : currentIndexMetaData.aliases().values()) {
|
||||
for (ObjectCursor<AliasMetaData> alias : currentIndexMetaData.getAliases().values()) {
|
||||
indexMdBuilder.putAlias(alias.value);
|
||||
}
|
||||
} else {
|
||||
for (ObjectCursor<String> alias : snapshotIndexMetaData.aliases().keys()) {
|
||||
for (ObjectCursor<String> alias : snapshotIndexMetaData.getAliases().keys()) {
|
||||
aliases.add(alias.value);
|
||||
}
|
||||
}
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.settings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.indexUUID()));
|
||||
indexMdBuilder.settings(Settings.settingsBuilder().put(snapshotIndexMetaData.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, currentIndexMetaData.getIndexUUID()));
|
||||
IndexMetaData updatedIndexMetaData = indexMdBuilder.index(renamedIndex).build();
|
||||
rtBuilder.addAsRestore(updatedIndexMetaData, restoreSource);
|
||||
blocks.updateBlocks(updatedIndexMetaData);
|
||||
|
@ -359,7 +359,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
|
||||
private void validateExistingIndex(IndexMetaData currentIndexMetaData, IndexMetaData snapshotIndexMetaData, String renamedIndex, boolean partial) {
|
||||
// Index exist - checking that it's closed
|
||||
if (currentIndexMetaData.state() != IndexMetaData.State.CLOSE) {
|
||||
if (currentIndexMetaData.getState() != IndexMetaData.State.CLOSE) {
|
||||
// TODO: Enable restore for open indices
|
||||
throw new SnapshotRestoreException(snapshotId, "cannot restore index [" + renamedIndex + "] because it's open");
|
||||
}
|
||||
|
@ -384,7 +384,7 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
}
|
||||
Settings normalizedChangeSettings = Settings.settingsBuilder().put(changeSettings).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX).build();
|
||||
IndexMetaData.Builder builder = IndexMetaData.builder(indexMetaData);
|
||||
Map<String, String> settingsMap = new HashMap<>(indexMetaData.settings().getAsMap());
|
||||
Map<String, String> settingsMap = new HashMap<>(indexMetaData.getSettings().getAsMap());
|
||||
List<String> simpleMatchPatterns = new ArrayList<>();
|
||||
for (String ignoredSetting : ignoreSettings) {
|
||||
if (!Regex.isSimpleMatchPattern(ignoredSetting)) {
|
||||
|
|
|
@ -1023,13 +1023,13 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
// The index was deleted before we managed to start the snapshot - mark it as missing.
|
||||
builder.put(new ShardId(index, 0), new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "missing index"));
|
||||
} else if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
|
||||
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
|
||||
ShardId shardId = new ShardId(index, i);
|
||||
builder.put(shardId, new SnapshotsInProgress.ShardSnapshotStatus(null, State.MISSING, "index is closed"));
|
||||
}
|
||||
} else {
|
||||
IndexRoutingTable indexRoutingTable = clusterState.getRoutingTable().index(index);
|
||||
for (int i = 0; i < indexMetaData.numberOfShards(); i++) {
|
||||
for (int i = 0; i < indexMetaData.getNumberOfShards(); i++) {
|
||||
ShardId shardId = new ShardId(index, i);
|
||||
if (indexRoutingTable != null) {
|
||||
ShardRouting primary = indexRoutingTable.shard(i).primaryShard();
|
||||
|
|
|
@ -261,17 +261,17 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
RoutingTable.Builder routingTable = RoutingTable.builder(currentState.routingTable());
|
||||
// go over existing indices, and see if they need to be removed
|
||||
for (IndexMetaData index : currentState.metaData()) {
|
||||
String markedTribeName = index.settings().get(TRIBE_NAME);
|
||||
String markedTribeName = index.getSettings().get(TRIBE_NAME);
|
||||
if (markedTribeName != null && markedTribeName.equals(tribeName)) {
|
||||
IndexMetaData tribeIndex = tribeState.metaData().index(index.index());
|
||||
if (tribeIndex == null || tribeIndex.state() == IndexMetaData.State.CLOSE) {
|
||||
logger.info("[{}] removing index [{}]", tribeName, index.index());
|
||||
IndexMetaData tribeIndex = tribeState.metaData().index(index.getIndex());
|
||||
if (tribeIndex == null || tribeIndex.getState() == IndexMetaData.State.CLOSE) {
|
||||
logger.info("[{}] removing index [{}]", tribeName, index.getIndex());
|
||||
removeIndex(blocks, metaData, routingTable, index);
|
||||
} else {
|
||||
// always make sure to update the metadata and routing table, in case
|
||||
// there are changes in them (new mapping, shards moving from initializing to started)
|
||||
routingTable.add(tribeState.routingTable().index(index.index()));
|
||||
Settings tribeSettings = Settings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
|
||||
routingTable.add(tribeState.routingTable().index(index.getIndex()));
|
||||
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
|
||||
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
|
||||
}
|
||||
}
|
||||
|
@ -279,15 +279,15 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
// go over tribe one, and see if they need to be added
|
||||
for (IndexMetaData tribeIndex : tribeState.metaData()) {
|
||||
// if there is no routing table yet, do nothing with it...
|
||||
IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.index());
|
||||
IndexRoutingTable table = tribeState.routingTable().index(tribeIndex.getIndex());
|
||||
if (table == null) {
|
||||
continue;
|
||||
}
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.index());
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
|
||||
if (indexMetaData == null) {
|
||||
if (!droppedIndices.contains(tribeIndex.index())) {
|
||||
if (!droppedIndices.contains(tribeIndex.getIndex())) {
|
||||
// a new index, add it, and add the tribe name as a setting
|
||||
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.index());
|
||||
logger.info("[{}] adding index [{}]", tribeName, tribeIndex.getIndex());
|
||||
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
|
||||
}
|
||||
} else {
|
||||
|
@ -298,15 +298,15 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
// we chose any tribe, carry on
|
||||
} else if (ON_CONFLICT_DROP.equals(onConflict)) {
|
||||
// drop the indices, there is a conflict
|
||||
logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.index(), existingFromTribe);
|
||||
logger.info("[{}] dropping index [{}] due to conflict with [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
|
||||
removeIndex(blocks, metaData, routingTable, tribeIndex);
|
||||
droppedIndices.add(tribeIndex.index());
|
||||
droppedIndices.add(tribeIndex.getIndex());
|
||||
} else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
|
||||
// on conflict, prefer a tribe...
|
||||
String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length());
|
||||
if (tribeName.equals(preferredTribeName)) {
|
||||
// the new one is hte preferred one, replace...
|
||||
logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.index(), existingFromTribe);
|
||||
logger.info("[{}] adding index [{}], preferred over [{}]", tribeName, tribeIndex.getIndex(), existingFromTribe);
|
||||
removeIndex(blocks, metaData, routingTable, tribeIndex);
|
||||
addNewIndex(tribeState, blocks, metaData, routingTable, tribeIndex);
|
||||
} // else: either the existing one is the preferred one, or we haven't seen one, carry on
|
||||
|
@ -319,23 +319,23 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
}
|
||||
|
||||
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData index) {
|
||||
metaData.remove(index.index());
|
||||
routingTable.remove(index.index());
|
||||
blocks.removeIndexBlocks(index.index());
|
||||
metaData.remove(index.getIndex());
|
||||
routingTable.remove(index.getIndex());
|
||||
blocks.removeIndexBlocks(index.getIndex());
|
||||
}
|
||||
|
||||
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
|
||||
Settings tribeSettings = Settings.builder().put(tribeIndex.settings()).put(TRIBE_NAME, tribeName).build();
|
||||
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME, tribeName).build();
|
||||
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
|
||||
routingTable.add(tribeState.routingTable().index(tribeIndex.index()));
|
||||
if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.index())) {
|
||||
blocks.addIndexBlock(tribeIndex.index(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));
|
||||
if (Regex.simpleMatch(blockIndicesMetadata, tribeIndex.getIndex())) {
|
||||
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_METADATA_BLOCK);
|
||||
}
|
||||
if (Regex.simpleMatch(blockIndicesRead, tribeIndex.index())) {
|
||||
blocks.addIndexBlock(tribeIndex.index(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
if (Regex.simpleMatch(blockIndicesRead, tribeIndex.getIndex())) {
|
||||
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_READ_BLOCK);
|
||||
}
|
||||
if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.index())) {
|
||||
blocks.addIndexBlock(tribeIndex.index(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
if (Regex.simpleMatch(blockIndicesWrite, tribeIndex.getIndex())) {
|
||||
blocks.addIndexBlock(tribeIndex.getIndex(), IndexMetaData.INDEX_WRITE_BLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -158,9 +158,9 @@ public class ClusterHealthResponsesTests extends ESTestCase {
|
|||
}
|
||||
|
||||
IndexRoutingTable genIndexRoutingTable(IndexMetaData indexMetaData, ShardCounter counter) {
|
||||
IndexRoutingTable.Builder builder = IndexRoutingTable.builder(indexMetaData.index());
|
||||
for (int shard = 0; shard < indexMetaData.numberOfShards(); shard++) {
|
||||
builder.addIndexShard(genShardRoutingTable(indexMetaData.index(), shard, indexMetaData.getNumberOfReplicas(), counter));
|
||||
IndexRoutingTable.Builder builder = IndexRoutingTable.builder(indexMetaData.getIndex());
|
||||
for (int shard = 0; shard < indexMetaData.getNumberOfShards(); shard++) {
|
||||
builder.addIndexShard(genShardRoutingTable(indexMetaData.getIndex(), shard, indexMetaData.getNumberOfReplicas(), counter));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
assertThat(indices.size(), equalTo(1));
|
||||
IndexMetaData index = indices.get("test");
|
||||
assertThat(index, notNullValue());
|
||||
assertThat(index.creationDate(), equalTo(4l));
|
||||
assertThat(index.getCreationDate(), equalTo(4l));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -72,7 +72,7 @@ public class CreateIndexIT extends ESIntegTestCase {
|
|||
assertThat(indices.size(), equalTo(1));
|
||||
IndexMetaData index = indices.get("test");
|
||||
assertThat(index, notNullValue());
|
||||
assertThat(index.creationDate(), allOf(lessThanOrEqualTo(timeAfterRequest), greaterThanOrEqualTo(timeBeforeRequest)));
|
||||
assertThat(index.getCreationDate(), allOf(lessThanOrEqualTo(timeAfterRequest), greaterThanOrEqualTo(timeBeforeRequest)));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -326,7 +326,7 @@ public class ShardReplicationTests extends ESTestCase {
|
|||
|
||||
ClusterState state = stateWithStartedPrimary(index, true, randomInt(5));
|
||||
MetaData.Builder metaData = MetaData.builder(state.metaData());
|
||||
Settings.Builder settings = Settings.builder().put(metaData.get(index).settings());
|
||||
Settings.Builder settings = Settings.builder().put(metaData.get(index).getSettings());
|
||||
settings.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true);
|
||||
metaData.put(IndexMetaData.builder(metaData.get(index)).settings(settings));
|
||||
clusterService.setState(ClusterState.builder(state).metaData(metaData));
|
||||
|
|
|
@ -149,7 +149,7 @@ public class IndexAliasesIT extends ESIntegTestCase {
|
|||
logger.info("--> making sure that filter was stored with alias [alias1] and filter [user:kimchy]");
|
||||
ClusterState clusterState = admin().cluster().prepareState().get().getState();
|
||||
IndexMetaData indexMd = clusterState.metaData().index("test");
|
||||
assertThat(indexMd.aliases().get("alias1").filter().string(), equalTo("{\"term\":{\"user\":{\"value\":\"kimchy\",\"boost\":1.0}}}"));
|
||||
assertThat(indexMd.getAliases().get("alias1").filter().string(), equalTo("{\"term\":{\"user\":{\"value\":\"kimchy\",\"boost\":1.0}}}"));
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ public class ClusterStateBackwardsCompatIT extends ESBackcompatTestCase {
|
|||
|
||||
IndexMetaData indexMetaData = response.getState().getMetaData().getIndices().get("test-blocks");
|
||||
assertNotNull(indexMetaData);
|
||||
assertTrue(indexMetaData.settings().getAsBoolean(block.getKey(), null));
|
||||
assertTrue(indexMetaData.getSettings().getAsBoolean(block.getKey(), null));
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
|
|
|
@ -510,17 +510,17 @@ public class ClusterStateDiffIT extends ESIntegTestCase {
|
|||
IndexMetaData.Builder builder = IndexMetaData.builder(part);
|
||||
switch (randomIntBetween(0, 3)) {
|
||||
case 0:
|
||||
builder.settings(Settings.builder().put(part.settings()).put(randomSettings(Settings.EMPTY)));
|
||||
builder.settings(Settings.builder().put(part.getSettings()).put(randomSettings(Settings.EMPTY)));
|
||||
break;
|
||||
case 1:
|
||||
if (randomBoolean() && part.aliases().isEmpty() == false) {
|
||||
builder.removeAlias(randomFrom(part.aliases().keys().toArray(String.class)));
|
||||
if (randomBoolean() && part.getAliases().isEmpty() == false) {
|
||||
builder.removeAlias(randomFrom(part.getAliases().keys().toArray(String.class)));
|
||||
} else {
|
||||
builder.putAlias(AliasMetaData.builder(randomAsciiOfLength(10)));
|
||||
}
|
||||
break;
|
||||
case 2:
|
||||
builder.settings(Settings.builder().put(part.settings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
builder.settings(Settings.builder().put(part.getSettings()).put(IndexMetaData.SETTING_INDEX_UUID, Strings.randomBase64UUID()));
|
||||
break;
|
||||
case 3:
|
||||
builder.putCustom(IndexWarmersMetaData.TYPE, randomWarmers());
|
||||
|
|
|
@ -172,14 +172,14 @@ public class SimpleClusterStateIT extends ESIntegTestCase {
|
|||
client().admin().indices().close(Requests.closeIndexRequest("fuu")).get();
|
||||
clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*").get();
|
||||
assertThat(clusterStateResponse.getState().metaData().indices().size(), is(1));
|
||||
assertThat(clusterStateResponse.getState().metaData().index("foo").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(clusterStateResponse.getState().metaData().index("foo").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
|
||||
// expand_wildcards_closed should toggle return only closed index fuu
|
||||
IndicesOptions expandCloseOptions = IndicesOptions.fromOptions(false, true, false, true);
|
||||
clusterStateResponse = client().admin().cluster().prepareState().clear().setMetaData(true).setIndices("f*")
|
||||
.setIndicesOptions(expandCloseOptions).get();
|
||||
assertThat(clusterStateResponse.getState().metaData().indices().size(), is(1));
|
||||
assertThat(clusterStateResponse.getState().metaData().index("fuu").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(clusterStateResponse.getState().metaData().index("fuu").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
|
||||
// ignore_unavailable set to true should not raise exception on fzzbzz
|
||||
IndicesOptions ignoreUnavailabe = IndicesOptions.fromOptions(true, true, true, false);
|
||||
|
|
|
@ -74,7 +74,7 @@ public class AckIT extends ESIntegTestCase {
|
|||
.setSettings(Settings.builder().put("refresh_interval", 9999, TimeUnit.MILLISECONDS)));
|
||||
|
||||
for (Client client : clients()) {
|
||||
String refreshInterval = getLocalClusterState(client).metaData().index("test").settings().get("index.refresh_interval");
|
||||
String refreshInterval = getLocalClusterState(client).metaData().index("test").getSettings().get("index.refresh_interval");
|
||||
assertThat(refreshInterval, equalTo("9999ms"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,142 +151,142 @@ public class ToAndFromJsonMetaDataTests extends ESTestCase {
|
|||
MetaData parsedMetaData = MetaData.Builder.fromXContent(XContentFactory.xContent(XContentType.JSON).createParser(metaDataSource));
|
||||
|
||||
IndexMetaData indexMetaData = parsedMetaData.index("test1");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(0));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test2");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(2));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(3));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(2));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(3));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(0));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test3");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(1));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(3));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test4");
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(0));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test5");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test6");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(0));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(0));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test7");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(4));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test8");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.aliases().size(), equalTo(2));
|
||||
assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getAliases().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2"));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test9");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.aliases().size(), equalTo(2));
|
||||
assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getAliases().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2"));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test10");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.aliases().size(), equalTo(2));
|
||||
assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getAliases().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2"));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test11");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.aliases().size(), equalTo(3));
|
||||
assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.aliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
|
||||
assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.aliases().get("alias2").filter(), nullValue());
|
||||
assertThat(indexMetaData.aliases().get("alias4").alias(), equalTo("alias4"));
|
||||
assertThat(indexMetaData.aliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(-1l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(5));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getAliases().size(), equalTo(3));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").filter(), nullValue());
|
||||
assertThat(indexMetaData.getAliases().get("alias4").alias(), equalTo("alias4"));
|
||||
assertThat(indexMetaData.getAliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
|
||||
|
||||
indexMetaData = parsedMetaData.index("test12");
|
||||
assertThat(indexMetaData.numberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.numberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.creationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.settings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.settings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.settings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.mappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.mappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.mappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.aliases().size(), equalTo(3));
|
||||
assertThat(indexMetaData.aliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.aliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
|
||||
assertThat(indexMetaData.aliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.aliases().get("alias2").filter(), nullValue());
|
||||
assertThat(indexMetaData.aliases().get("alias4").alias(), equalTo("alias4"));
|
||||
assertThat(indexMetaData.aliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
|
||||
assertThat(indexMetaData.getNumberOfShards(), equalTo(1));
|
||||
assertThat(indexMetaData.getNumberOfReplicas(), equalTo(2));
|
||||
assertThat(indexMetaData.getCreationDate(), equalTo(2l));
|
||||
assertThat(indexMetaData.getSettings().getAsMap().size(), equalTo(6));
|
||||
assertThat(indexMetaData.getSettings().get("setting1"), equalTo("value1"));
|
||||
assertThat(indexMetaData.getSettings().get("setting2"), equalTo("value2"));
|
||||
assertThat(indexMetaData.getMappings().size(), equalTo(2));
|
||||
assertThat(indexMetaData.getMappings().get("mapping1").source().string(), equalTo(MAPPING_SOURCE1));
|
||||
assertThat(indexMetaData.getMappings().get("mapping2").source().string(), equalTo(MAPPING_SOURCE2));
|
||||
assertThat(indexMetaData.getAliases().size(), equalTo(3));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").alias(), equalTo("alias1"));
|
||||
assertThat(indexMetaData.getAliases().get("alias1").filter().string(), equalTo(ALIAS_FILTER1));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").alias(), equalTo("alias2"));
|
||||
assertThat(indexMetaData.getAliases().get("alias2").filter(), nullValue());
|
||||
assertThat(indexMetaData.getAliases().get("alias4").alias(), equalTo("alias4"));
|
||||
assertThat(indexMetaData.getAliases().get("alias4").filter().string(), equalTo(ALIAS_FILTER2));
|
||||
|
||||
// templates
|
||||
assertThat(parsedMetaData.templates().get("foo").name(), is("foo"));
|
||||
|
|
|
@ -106,7 +106,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase {
|
|||
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(2).build();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
|
||||
|
||||
assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(2));
|
||||
assertThat(clusterState.metaData().index("test").getNumberOfReplicas(), equalTo(2));
|
||||
|
||||
assertThat(prevRoutingTable != routingTable, equalTo(true));
|
||||
assertThat(routingTable.index("test").shards().size(), equalTo(1));
|
||||
|
@ -157,7 +157,7 @@ public class UpdateNumberOfReplicasTests extends ESAllocationTestCase {
|
|||
metaData = MetaData.builder(clusterState.metaData()).updateNumberOfReplicas(1).build();
|
||||
clusterState = ClusterState.builder(clusterState).routingTable(routingTable).metaData(metaData).build();
|
||||
|
||||
assertThat(clusterState.metaData().index("test").numberOfReplicas(), equalTo(1));
|
||||
assertThat(clusterState.metaData().index("test").getNumberOfReplicas(), equalTo(1));
|
||||
|
||||
assertThat(prevRoutingTable != routingTable, equalTo(true));
|
||||
assertThat(routingTable.index("test").shards().size(), equalTo(1));
|
||||
|
|
|
@ -726,7 +726,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
private void assertProperMetaDataForVersion(MetaData metaData, long version) {
|
||||
for (long i = 1; i <= version; i++) {
|
||||
assertThat(metaData.index("test" + i), notNullValue());
|
||||
assertThat(metaData.index("test" + i).numberOfShards(), equalTo((int) i));
|
||||
assertThat(metaData.index("test" + i).getNumberOfShards(), equalTo((int) i));
|
||||
}
|
||||
assertThat(metaData.index("test" + (version + 1)), nullValue());
|
||||
assertThat(metaData.transientSettings().get("test"), equalTo(Long.toString(version)));
|
||||
|
|
|
@ -94,7 +94,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
ensureGreen();
|
||||
|
||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
|
||||
|
||||
|
@ -105,7 +105,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
client().admin().indices().prepareClose("test").get();
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
|
||||
logger.info("--> verifying that the state is green");
|
||||
|
@ -131,7 +131,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
ensureGreen();
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
|
||||
|
||||
|
@ -142,7 +142,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
logger.info("--> closing test index...");
|
||||
client().admin().indices().prepareClose("test").execute().actionGet();
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
|
||||
logger.info("--> restarting nodes...");
|
||||
|
@ -151,7 +151,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
ensureGreen();
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
|
||||
logger.info("--> trying to index into a closed index ...");
|
||||
|
@ -169,7 +169,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
ensureGreen();
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(test.numPrimaries));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(test.totalNumShards));
|
||||
|
||||
|
@ -246,7 +246,7 @@ public class GatewayIndexStateIT extends ESIntegTestCase {
|
|||
client().admin().indices().prepareClose("test").execute().actionGet();
|
||||
|
||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().execute().actionGet();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
|
||||
logger.info("--> opening the index...");
|
||||
|
|
|
@ -184,7 +184,7 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
|
||||
if (expectMetaData) {
|
||||
assertThat(indices.hasNext(), equalTo(true));
|
||||
assertThat(indices.next().getNewMetaData().index(), equalTo("test"));
|
||||
assertThat(indices.next().getNewMetaData().getIndex(), equalTo("test"));
|
||||
assertThat(indices.hasNext(), equalTo(false));
|
||||
} else {
|
||||
assertThat(indices.hasNext(), equalTo(false));
|
||||
|
|
|
@ -31,7 +31,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
|
@ -365,9 +364,9 @@ public class MetaDataStateFormatTests extends ESTestCase {
|
|||
for (IndexMetaData original : latestMetaData) {
|
||||
IndexMetaData deserialized = indices.get(original.getIndex());
|
||||
assertThat(deserialized, notNullValue());
|
||||
assertThat(deserialized.version(), equalTo(original.version()));
|
||||
assertThat(deserialized.numberOfReplicas(), equalTo(original.numberOfReplicas()));
|
||||
assertThat(deserialized.numberOfShards(), equalTo(original.numberOfShards()));
|
||||
assertThat(deserialized.getVersion(), equalTo(original.getVersion()));
|
||||
assertThat(deserialized.getNumberOfReplicas(), equalTo(original.getNumberOfReplicas()));
|
||||
assertThat(deserialized.getNumberOfShards(), equalTo(original.getNumberOfShards()));
|
||||
}
|
||||
|
||||
// now corrupt all the latest ones and make sure we fail to load the state
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.elasticsearch.test.InternalTestCluster;
|
|||
import org.junit.Test;
|
||||
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.concurrent.Future;
|
||||
|
||||
import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder;
|
||||
import static org.elasticsearch.test.ESIntegTestCase.Scope;
|
||||
|
@ -116,7 +115,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
// make sure it was also written on red node although index is closed
|
||||
ImmutableOpenMap<String, IndexMetaData> indicesMetaData = getIndicesMetaDataOnNode(dataNode);
|
||||
assertNotNull(((LinkedHashMap) (indicesMetaData.get(index).getMappings().get("doc").getSourceAsMap().get("properties"))).get("integer_field"));
|
||||
assertThat(indicesMetaData.get(index).state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
|
||||
/* Try the same and see if this also works if node was just restarted.
|
||||
* Each node holds an array of indices it knows of and checks if it should
|
||||
|
@ -141,12 +140,12 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
// make sure it was also written on red node although index is closed
|
||||
indicesMetaData = getIndicesMetaDataOnNode(dataNode);
|
||||
assertNotNull(((LinkedHashMap) (indicesMetaData.get(index).getMappings().get("doc").getSourceAsMap().get("properties"))).get("float_field"));
|
||||
assertThat(indicesMetaData.get(index).state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
|
||||
// finally check that meta data is also written of index opened again
|
||||
assertAcked(client().admin().indices().prepareOpen(index).get());
|
||||
indicesMetaData = getIndicesMetaDataOnNode(dataNode);
|
||||
assertThat(indicesMetaData.get(index).state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
}
|
||||
|
||||
protected void assertIndexNotInMetaState(String nodeName, String indexName) throws Exception {
|
||||
|
|
|
@ -325,8 +325,8 @@ public class RecoveryFromGatewayIT extends ESIntegTestCase {
|
|||
ClusterState state = client().admin().cluster().prepareState().execute().actionGet().getState();
|
||||
assertThat(state.metaData().index("test").mapping("type2"), notNullValue());
|
||||
assertThat(state.metaData().templates().get("template_1").template(), equalTo("te*"));
|
||||
assertThat(state.metaData().index("test").aliases().get("test_alias"), notNullValue());
|
||||
assertThat(state.metaData().index("test").aliases().get("test_alias").filter(), notNullValue());
|
||||
assertThat(state.metaData().index("test").getAliases().get("test_alias"), notNullValue());
|
||||
assertThat(state.metaData().index("test").getAliases().get("test_alias").filter(), notNullValue());
|
||||
}
|
||||
|
||||
@Test
|
||||
|
|
|
@ -63,8 +63,8 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
IndexMetaData indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
|
||||
assertThat(indexMetaData.settings().get("index.refresh_interval"), nullValue());
|
||||
assertThat(indexMetaData.settings().get("index.cache.filter.type"), nullValue());
|
||||
assertThat(indexMetaData.getSettings().get("index.refresh_interval"), nullValue());
|
||||
assertThat(indexMetaData.getSettings().get("index.cache.filter.type"), nullValue());
|
||||
|
||||
// Now verify via dedicated get settings api:
|
||||
GetSettingsResponse getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
|
||||
|
@ -78,7 +78,7 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
.execute().actionGet();
|
||||
|
||||
indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
|
||||
assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("-1"));
|
||||
assertThat(indexMetaData.getSettings().get("index.refresh_interval"), equalTo("-1"));
|
||||
// Now verify via dedicated get settings api:
|
||||
getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
|
||||
assertThat(getSettingsResponse.getSetting("test", "index.refresh_interval"), equalTo("-1"));
|
||||
|
@ -110,8 +110,8 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
.execute().actionGet();
|
||||
|
||||
indexMetaData = client().admin().cluster().prepareState().execute().actionGet().getState().metaData().index("test");
|
||||
assertThat(indexMetaData.settings().get("index.refresh_interval"), equalTo("1s"));
|
||||
assertThat(indexMetaData.settings().get("index.cache.filter.type"), equalTo("none"));
|
||||
assertThat(indexMetaData.getSettings().get("index.refresh_interval"), equalTo("1s"));
|
||||
assertThat(indexMetaData.getSettings().get("index.cache.filter.type"), equalTo("none"));
|
||||
|
||||
// Now verify via dedicated get settings api:
|
||||
getSettingsResponse = client().admin().indices().prepareGetSettings("test").get();
|
||||
|
|
|
@ -60,7 +60,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
|
|||
NumShards numShards = getNumShards("test");
|
||||
|
||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
|
||||
|
||||
|
@ -72,7 +72,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
|
|||
assertThat(closeIndexResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test"), nullValue());
|
||||
|
||||
logger.info("--> trying to index into a closed index ...");
|
||||
|
@ -91,7 +91,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
|
|||
ensureGreen();
|
||||
|
||||
stateResponse = client().admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
|
||||
|
@ -127,7 +127,7 @@ public class SimpleIndexStateIT extends ESIntegTestCase {
|
|||
NumShards numShards = getNumShards("test");
|
||||
|
||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test").state(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().metaData().index("test").getState(), equalTo(IndexMetaData.State.OPEN));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shards().size(), equalTo(numShards.numPrimaries));
|
||||
assertThat(stateResponse.getState().routingTable().index("test").shardsWithState(ShardRoutingState.STARTED).size(), equalTo(numShards.totalNumShards));
|
||||
|
||||
|
|
|
@ -961,7 +961,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
logger.info("--> closing index test-idx-closed");
|
||||
assertAcked(client.admin().indices().prepareClose("test-idx-closed"));
|
||||
ClusterStateResponse stateResponse = client.admin().cluster().prepareState().get();
|
||||
assertThat(stateResponse.getState().metaData().index("test-idx-closed").state(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().metaData().index("test-idx-closed").getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
assertThat(stateResponse.getState().routingTable().index("test-idx-closed"), nullValue());
|
||||
|
||||
logger.info("--> snapshot");
|
||||
|
|
|
@ -109,7 +109,6 @@ import org.elasticsearch.index.translog.TranslogConfig;
|
|||
import org.elasticsearch.index.translog.TranslogWriter;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.flush.IndicesSyncedFlushResult;
|
||||
import org.elasticsearch.indices.flush.SyncedFlushService;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -1892,8 +1891,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
protected NumShards getNumShards(String index) {
|
||||
MetaData metaData = client().admin().cluster().prepareState().get().getState().metaData();
|
||||
assertThat(metaData.hasIndex(index), equalTo(true));
|
||||
int numShards = Integer.valueOf(metaData.index(index).settings().get(SETTING_NUMBER_OF_SHARDS));
|
||||
int numReplicas = Integer.valueOf(metaData.index(index).settings().get(SETTING_NUMBER_OF_REPLICAS));
|
||||
int numShards = Integer.valueOf(metaData.index(index).getSettings().get(SETTING_NUMBER_OF_SHARDS));
|
||||
int numReplicas = Integer.valueOf(metaData.index(index).getSettings().get(SETTING_NUMBER_OF_REPLICAS));
|
||||
return new NumShards(numShards, numReplicas);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue