Resolve index names to Index instances early
Today index names are often resolved lazily, only when they are really needed. This can be problematic especially when it gets to mapping updates etc. when a node sends a mapping update to the master but while the request is in-flight the index changes for whatever reason we would still apply the update since we use the name of the index to identify the index in the clusterstate. The problem is that index names can be reused which happens in practice and sometimes even in a automated way rendering this problem as realistic. In this change we resolve the index including it's UUID as early as possible in places where changes to the clusterstate are possible. For instance mapping updates on a node use a concrete index rather than it's name and the master will fail the mapping update iff the index can't be found by it's <name, uuid> tuple. Closes #17048
This commit is contained in:
parent
c90b4f3bae
commit
31740e279f
|
@ -213,7 +213,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
}
|
||||
if (request.indices() != null && request.indices().length > 0) {
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices());
|
||||
indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), request.indices());
|
||||
waitForCounter++;
|
||||
} catch (IndexNotFoundException e) {
|
||||
response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED
|
||||
|
@ -280,7 +280,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
|
|||
|
||||
String[] concreteIndices;
|
||||
try {
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
} catch (IndexNotFoundException e) {
|
||||
// one of the specified indices is not there - treat it as RED.
|
||||
ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState,
|
||||
|
|
|
@ -235,7 +235,7 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
scriptStats = in.readOptionalStreamable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
|
||||
ingestStats = in.readOptionalWritable(IngestStats.PROTO);
|
||||
ingestStats = in.readOptionalWritable(IngestStats.PROTO::readFrom);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -67,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = Index.readIndex(in);
|
||||
index = new Index(in);
|
||||
shardId = in.readVInt();
|
||||
shards = new ShardRouting[in.readVInt()];
|
||||
for (int i = 0; i < shards.length; i++) {
|
||||
|
|
|
@ -59,7 +59,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -70,7 +70,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
|
|||
@Override
|
||||
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices());
|
||||
Set<String> nodeIds = new HashSet<>();
|
||||
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
|
|
|
@ -66,7 +66,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
|
|||
if (clusterBlockException != null) {
|
||||
return clusterBlockException;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -106,7 +106,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
|
|||
}
|
||||
|
||||
if (request.indices().length > 0) {
|
||||
String[] indices = indexNameExpressionResolver.concreteIndices(currentState, request);
|
||||
String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request);
|
||||
for (String filteredIndex : indices) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex);
|
||||
if (indexMetaData != null) {
|
||||
|
|
|
@ -90,11 +90,11 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
|
|||
Set<String> aliases = new HashSet<>();
|
||||
for (AliasActions action : actions) {
|
||||
//expand indices
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), action.indices());
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices());
|
||||
//collect the aliases
|
||||
Collections.addAll(aliases, action.aliases());
|
||||
for (String index : concreteIndices) {
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
for (String alias : action.concreteAliases(state.metaData(), index)) {
|
||||
AliasAction finalAction = new AliasAction(action.aliasAction());
|
||||
finalAction.index(index);
|
||||
finalAction.alias(alias);
|
||||
|
|
|
@ -50,7 +50,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction<G
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -60,7 +60,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction<G
|
|||
|
||||
@Override
|
||||
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
|
||||
boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices);
|
||||
listener.onResponse(new AliasesExistResponse(result));
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -63,7 +63,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
|
|||
|
||||
@Override
|
||||
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
|
||||
@SuppressWarnings("unchecked")
|
||||
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
|
||||
listener.onResponse(new GetAliasesResponse(result));
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.settings.ClusterSettings;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -88,12 +89,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.indices(concreteIndices);
|
||||
|
|
|
@ -31,10 +31,15 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
* Delete index action.
|
||||
*/
|
||||
|
@ -70,13 +75,13 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
if (concreteIndices.length == 0) {
|
||||
final Set<Index> concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request)));
|
||||
if (concreteIndices.isEmpty()) {
|
||||
listener.onResponse(new DeleteIndexResponse(true));
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction<
|
|||
protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) {
|
||||
//make sure through indices options that the concrete indices call never throws IndexMissingException
|
||||
IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed());
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, indicesOptions, request.indices()));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices()));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -68,7 +68,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction<
|
|||
boolean exists;
|
||||
try {
|
||||
// Similar as the previous behaviour, but now also aliases and wildcards are supported.
|
||||
indexNameExpressionResolver.concreteIndices(state, request);
|
||||
indexNameExpressionResolver.concreteIndexNames(state, request);
|
||||
exists = true;
|
||||
} catch (IndexNotFoundException e) {
|
||||
exists = false;
|
||||
|
|
|
@ -57,12 +57,12 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction<Ty
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices());
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices());
|
||||
if (concreteIndices.length == 0) {
|
||||
listener.onResponse(new TypesExistsResponse(false));
|
||||
return;
|
||||
|
|
|
@ -46,10 +46,9 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
|
|||
@Inject
|
||||
public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -60,7 +60,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -56,7 +56,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
|
|||
@Override
|
||||
protected void doExecute(GetFieldMappingsRequest request, final ActionListener<GetFieldMappingsResponse> listener) {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
final AtomicInteger indexCounter = new AtomicInteger();
|
||||
final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
|
||||
final AtomicReferenceArray<Object> indexResponses = new AtomicReferenceArray<>(concreteIndices.length);
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -32,9 +32,11 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
|
@ -65,6 +67,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
private String source;
|
||||
|
||||
private boolean updateAllTypes = false;
|
||||
private Index concreteIndex;
|
||||
|
||||
public PutMappingRequest() {
|
||||
}
|
||||
|
@ -90,6 +93,9 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
} else if (source.isEmpty()) {
|
||||
validationException = addValidationError("mapping source is empty", validationException);
|
||||
}
|
||||
if (concreteIndex != null && (indices != null && indices.length > 0)) {
|
||||
validationException = addValidationError("either concreteIndices or unresolved indices can be set", validationException);
|
||||
}
|
||||
return validationException;
|
||||
}
|
||||
|
||||
|
@ -102,6 +108,22 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets a concrete index for this put mapping request.
|
||||
*/
|
||||
public PutMappingRequest setConcreteIndex(Index index) {
|
||||
Objects.requireNonNull(indices, "index must not be null");
|
||||
this.concreteIndex = index;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a concrete index for this mapping or <code>null</code> if no concrete index is defined
|
||||
*/
|
||||
public Index getConcreteIndex() {
|
||||
return concreteIndex;
|
||||
}
|
||||
|
||||
/**
|
||||
* The indices the mappings will be put.
|
||||
*/
|
||||
|
@ -259,6 +281,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
source = in.readString();
|
||||
updateAllTypes = in.readBoolean();
|
||||
readTimeout(in);
|
||||
concreteIndex = in.readOptionalWritable(Index::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -270,5 +293,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
|
|||
out.writeString(source);
|
||||
out.writeBoolean(updateAllTypes);
|
||||
writeTimeout(out);
|
||||
out.writeOptionalWriteable(concreteIndex);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions;
|
|||
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -40,6 +41,11 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
|
|||
return this;
|
||||
}
|
||||
|
||||
public PutMappingRequestBuilder setConcreteIndex(Index index) {
|
||||
request.setConcreteIndex(index);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Specifies what type of requested indices to ignore and wildcard indices expressions.
|
||||
* <p>
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -63,13 +64,19 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
String[] indices;
|
||||
if (request.getConcreteIndex() == null) {
|
||||
indices = indexNameExpressionResolver.concreteIndexNames(state, request);
|
||||
} else {
|
||||
indices = new String[] {request.getConcreteIndex().getName()};
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) {
|
||||
try {
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()};
|
||||
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.indices(concreteIndices).type(request.type())
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -73,12 +74,12 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(OpenIndexRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) {
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
|
||||
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
|
||||
.indices(concreteIndices);
|
||||
|
|
|
@ -48,10 +48,9 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
|
|||
@Inject
|
||||
public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.regex.Regex;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsFilter;
|
||||
import org.elasticsearch.common.util.CollectionUtils;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -61,7 +62,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(GetSettingsRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
|
||||
|
@ -72,9 +73,9 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
|
|||
|
||||
@Override
|
||||
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
|
||||
for (String concreteIndex : concreteIndices) {
|
||||
for (Index concreteIndex : concreteIndices) {
|
||||
IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex);
|
||||
if (indexMetaData == null) {
|
||||
continue;
|
||||
|
@ -93,7 +94,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
|
|||
}
|
||||
settings = settingsBuilder.build();
|
||||
}
|
||||
indexToSettingsBuilder.put(concreteIndex, settings);
|
||||
indexToSettingsBuilder.put(concreteIndex.getName(), settings);
|
||||
}
|
||||
listener.onResponse(new GetSettingsResponse(indexToSettingsBuilder.build()));
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -65,7 +66,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
|
||||
return null;
|
||||
}
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,7 +76,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
|
|||
|
||||
@Override
|
||||
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) {
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
|
||||
.indices(concreteIndices)
|
||||
.settings(request.settings())
|
||||
|
|
|
@ -87,7 +87,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener<IndicesShardStoresResponse> listener) {
|
||||
final RoutingTable routingTables = state.routingTable();
|
||||
final RoutingNodes routingNodes = state.getRoutingNodes();
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
|
||||
final Set<ShardId> shardIdsToFetch = new HashSet<>();
|
||||
|
||||
logger.trace("using cluster state version [{}] to determine shards", state.version());
|
||||
|
@ -115,7 +115,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
|
|||
|
||||
@Override
|
||||
protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
|
||||
}
|
||||
|
||||
private class AsyncShardStoresInfoFetches {
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndexAlreadyExistsException;
|
||||
|
@ -245,17 +246,18 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
|
||||
continue;
|
||||
}
|
||||
String concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
|
||||
Index concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
MappingMetaData mappingMd = null;
|
||||
if (metaData.hasIndex(concreteIndex)) {
|
||||
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
|
||||
final IndexMetaData indexMetaData = metaData.index(concreteIndex);
|
||||
if (indexMetaData != null) {
|
||||
mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
|
||||
}
|
||||
try {
|
||||
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
|
||||
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex.getName());
|
||||
} catch (ElasticsearchParseException | RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), indexRequest.type(), indexRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
|
@ -263,9 +265,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
try {
|
||||
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex, (DeleteRequest)request);
|
||||
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest)request);
|
||||
} catch(RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
|
@ -274,9 +276,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
|
||||
} else if (request instanceof UpdateRequest) {
|
||||
try {
|
||||
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex, (UpdateRequest)request);
|
||||
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest)request);
|
||||
} catch(RoutingMissingException e) {
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
|
||||
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure);
|
||||
responses.set(i, bulkItemResponse);
|
||||
// make sure the request gets never processed again
|
||||
|
@ -294,7 +296,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
ActionRequest request = bulkRequest.requests.get(i);
|
||||
if (request instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index());
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
|
@ -304,7 +306,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
list.add(new BulkItemRequest(i, request));
|
||||
} else if (request instanceof DeleteRequest) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index());
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
|
@ -314,7 +316,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
list.add(new BulkItemRequest(i, request));
|
||||
} else if (request instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request;
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index());
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName();
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
|
@ -356,18 +358,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
public void onFailure(Throwable e) {
|
||||
// create failures for all relevant requests
|
||||
for (BulkItemRequest request : requests) {
|
||||
final String indexName = concreteIndices.getConcreteIndex(request.index()).getName();
|
||||
if (request.request() instanceof IndexRequest) {
|
||||
IndexRequest indexRequest = (IndexRequest) request.request();
|
||||
responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
|
||||
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), e)));
|
||||
new BulkItemResponse.Failure(indexName, indexRequest.type(), indexRequest.id(), e)));
|
||||
} else if (request.request() instanceof DeleteRequest) {
|
||||
DeleteRequest deleteRequest = (DeleteRequest) request.request();
|
||||
responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
|
||||
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), e)));
|
||||
new BulkItemResponse.Failure(indexName, deleteRequest.type(), deleteRequest.id(), e)));
|
||||
} else if (request.request() instanceof UpdateRequest) {
|
||||
UpdateRequest updateRequest = (UpdateRequest) request.request();
|
||||
responses.set(request.id(), new BulkItemResponse(request.id(), "update",
|
||||
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), e)));
|
||||
new BulkItemResponse.Failure(indexName, updateRequest.type(), updateRequest.id(), e)));
|
||||
}
|
||||
}
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
@ -385,7 +388,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
private boolean addFailureIfIndexIsUnavailable(DocumentRequest request, BulkRequest bulkRequest, AtomicArray<BulkItemResponse> responses, int idx,
|
||||
final ConcreteIndices concreteIndices,
|
||||
final MetaData metaData) {
|
||||
String concreteIndex = concreteIndices.getConcreteIndex(request.index());
|
||||
Index concreteIndex = concreteIndices.getConcreteIndex(request.index());
|
||||
Exception unavailableException = null;
|
||||
if (concreteIndex == null) {
|
||||
try {
|
||||
|
@ -397,9 +400,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
}
|
||||
}
|
||||
if (unavailableException == null) {
|
||||
IndexMetaData indexMetaData = metaData.index(concreteIndex);
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(concreteIndex);
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
unavailableException = new IndexClosedException(metaData.index(request.index()).getIndex());
|
||||
unavailableException = new IndexClosedException(concreteIndex);
|
||||
}
|
||||
}
|
||||
if (unavailableException != null) {
|
||||
|
@ -425,19 +428,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
|||
private static class ConcreteIndices {
|
||||
private final ClusterState state;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
private final Map<String, String> indices = new HashMap<>();
|
||||
private final Map<String, Index> indices = new HashMap<>();
|
||||
|
||||
ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
this.state = state;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
}
|
||||
|
||||
String getConcreteIndex(String indexOrAlias) {
|
||||
Index getConcreteIndex(String indexOrAlias) {
|
||||
return indices.get(indexOrAlias);
|
||||
}
|
||||
|
||||
String resolveIfAbsent(DocumentRequest request) {
|
||||
String concreteIndex = indices.get(request.index());
|
||||
Index resolveIfAbsent(DocumentRequest request) {
|
||||
Index concreteIndex = indices.get(request.index());
|
||||
if (concreteIndex == null) {
|
||||
concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request);
|
||||
indices.put(request.index(), concreteIndex);
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -75,17 +74,19 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
|
|||
|
||||
private final UpdateHelper updateHelper;
|
||||
private final boolean allowIdGeneration;
|
||||
private final MappingUpdatedAction mappingUpdatedAction;
|
||||
|
||||
@Inject
|
||||
public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK);
|
||||
this.updateHelper = updateHelper;
|
||||
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -60,10 +60,10 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
|
|||
public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
TransportCreateIndexAction createIndexAction, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, MappingUpdatedAction mappingUpdatedAction,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
mappingUpdatedAction, actionFilters, indexNameExpressionResolver,
|
||||
actionFilters, indexNameExpressionResolver,
|
||||
DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX);
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
|
|
|
@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
|
|||
continue;
|
||||
}
|
||||
item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), item.index()));
|
||||
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item);
|
||||
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
|
||||
if (item.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type())) {
|
||||
responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(concreteSingleIndex, item.type(), item.id(),
|
||||
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]"))));
|
||||
|
|
|
@ -42,6 +42,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
|
||||
|
@ -584,14 +585,6 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
return this.versionType;
|
||||
}
|
||||
|
||||
private Version getVersion(MetaData metaData, String concreteIndex) {
|
||||
// this can go away in 3.0 but is here now for easy backporting - since in 2.x we need the version on the timestamp stuff
|
||||
final IndexMetaData indexMetaData = metaData.getIndices().get(concreteIndex);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(concreteIndex);
|
||||
}
|
||||
return Version.indexCreated(indexMetaData.getSettings());
|
||||
}
|
||||
|
||||
public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) {
|
||||
// resolve the routing if needed
|
||||
|
@ -600,8 +593,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
// resolve timestamp if provided externally
|
||||
if (timestamp != null) {
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp,
|
||||
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER,
|
||||
getVersion(metaData, concreteIndex));
|
||||
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER);
|
||||
}
|
||||
if (mappingMd != null) {
|
||||
// might as well check for routing here
|
||||
|
@ -645,7 +637,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
|
|||
// assigned again because mappingMd and
|
||||
// mappingMd#timestamp() are not null
|
||||
assert mappingMd != null;
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex));
|
||||
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
private final TransportCreateIndexAction createIndexAction;
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final MappingUpdatedAction mappingUpdatedAction;
|
||||
|
||||
@Inject
|
||||
public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
|
||||
|
@ -76,8 +77,9 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
AutoCreateIndex autoCreateIndex) {
|
||||
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
|
||||
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
|
||||
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
this.createIndexAction = createIndexAction;
|
||||
this.autoCreateIndex = autoCreateIndex;
|
||||
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
|
||||
|
@ -143,7 +145,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
|
||||
|
||||
// validate, if routing is required, that we got routing
|
||||
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(request.shardId().getIndex());
|
||||
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
|
||||
if (mappingMd != null && mappingMd.routing().required()) {
|
||||
if (request.routing() == null) {
|
||||
|
@ -205,8 +207,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
|
|||
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
final ShardId shardId = indexShard.shardId();
|
||||
if (update != null) {
|
||||
final String indexName = shardId.getIndexName();
|
||||
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
|
||||
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
|
||||
operation = prepareIndexOperationOnPrimary(request, indexShard);
|
||||
update = operation.parsedDoc().dynamicMappingsUpdate();
|
||||
if (update != null) {
|
||||
|
|
|
@ -173,7 +173,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
|
|||
PercolateRequest percolateRequest = (PercolateRequest) element;
|
||||
String[] concreteIndices;
|
||||
try {
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, percolateRequest);
|
||||
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, percolateRequest);
|
||||
} catch (IndexNotFoundException e) {
|
||||
reducedResponses.set(slot, e);
|
||||
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
|
||||
|
|
|
@ -96,7 +96,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
|
|||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(),
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(),
|
||||
startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
|
|
|
@ -64,7 +64,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
// optimize search type for cases where there is only one shard group to search on
|
||||
try {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
|
||||
searchRequest.routing(), searchRequest.indices());
|
||||
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
|
||||
|
|
|
@ -125,7 +125,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
|
|||
throw blockException;
|
||||
}
|
||||
// update to concrete indices
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
blockException = checkRequestBlock(clusterState, request, concreteIndices);
|
||||
if (blockException != null) {
|
||||
throw blockException;
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.elasticsearch.transport.BaseTransportResponseHandler;
|
|||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -241,7 +240,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
|
|||
throw globalBlockException;
|
||||
}
|
||||
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
ClusterBlockException requestBlockException = checkRequestBlock(clusterState, request, concreteIndices);
|
||||
if (requestBlockException != null) {
|
||||
throw requestBlockException;
|
||||
|
|
|
@ -50,7 +50,7 @@ public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequ
|
|||
|
||||
@Override
|
||||
protected final void masterOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
|
||||
doMasterOperation(request, concreteIndices, state, listener);
|
||||
}
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
logger.trace("{}: got failure from {}", actionName, shardId);
|
||||
int totalNumCopies = clusterState.getMetaData().index(shardId.getIndexName()).getNumberOfReplicas() + 1;
|
||||
int totalNumCopies = clusterState.getMetaData().getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1;
|
||||
ShardResponse shardResponse = newShardResponse();
|
||||
ReplicationResponse.ShardInfo.Failure[] failures;
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
|
@ -130,7 +130,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
|
|||
*/
|
||||
protected List<ShardId> shards(Request request, ClusterState clusterState) {
|
||||
List<ShardId> shardIds = new ArrayList<>();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index);
|
||||
if (indexMetaData != null) {
|
||||
|
|
|
@ -103,7 +103,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
protected final ShardStateAction shardStateAction;
|
||||
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
|
||||
protected final TransportRequestOptions transportOptions;
|
||||
protected final MappingUpdatedAction mappingUpdatedAction;
|
||||
|
||||
final String transportReplicaAction;
|
||||
final String transportPrimaryAction;
|
||||
|
@ -113,7 +112,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
|
||||
ClusterService clusterService, IndicesService indicesService,
|
||||
ThreadPool threadPool, ShardStateAction shardStateAction,
|
||||
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
|
||||
Supplier<ReplicaRequest> replicaRequest, String executor) {
|
||||
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
|
||||
|
@ -121,7 +120,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
this.clusterService = clusterService;
|
||||
this.indicesService = indicesService;
|
||||
this.shardStateAction = shardStateAction;
|
||||
this.mappingUpdatedAction = mappingUpdatedAction;
|
||||
|
||||
this.transportPrimaryAction = actionName + "[p]";
|
||||
this.transportReplicaAction = actionName + "[r]";
|
||||
|
@ -525,7 +523,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
|||
}
|
||||
|
||||
private String concreteIndex(ClusterState state) {
|
||||
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
|
||||
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request).getName() : request.index();
|
||||
}
|
||||
|
||||
private ShardRouting primary(ClusterState state) {
|
||||
|
|
|
@ -138,7 +138,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
|
|||
throw blockException;
|
||||
}
|
||||
}
|
||||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
|
||||
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request).getName());
|
||||
resolveRequest(observer.observedState(), request);
|
||||
blockException = checkRequestBlock(observer.observedState(), request);
|
||||
if (blockException != null) {
|
||||
|
|
|
@ -141,7 +141,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
|
|||
|
||||
String concreteSingleIndex;
|
||||
if (resolveIndex(request)) {
|
||||
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, request);
|
||||
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, request).getName();
|
||||
} else {
|
||||
concreteSingleIndex = request.index();
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
|
|||
termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()))));
|
||||
continue;
|
||||
}
|
||||
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, (DocumentRequest) termVectorsRequest);
|
||||
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
|
||||
if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
|
||||
responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
|
||||
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));
|
||||
|
|
|
@ -135,8 +135,8 @@ public class ClusterChangedEvent {
|
|||
List<Index> deleted = null;
|
||||
for (ObjectCursor<IndexMetaData> cursor : previousState.metaData().indices().values()) {
|
||||
IndexMetaData index = cursor.value;
|
||||
IndexMetaData current = state.metaData().index(index.getIndex().getName());
|
||||
if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) {
|
||||
IndexMetaData current = state.metaData().index(index.getIndex());
|
||||
if (current == null) {
|
||||
if (deleted == null) {
|
||||
deleted = new ArrayList<>();
|
||||
}
|
||||
|
|
|
@ -18,17 +18,19 @@
|
|||
*/
|
||||
package org.elasticsearch.cluster.ack;
|
||||
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
/**
|
||||
* Base cluster state update request that allows to execute update against multiple indices
|
||||
*/
|
||||
public abstract class IndicesClusterStateUpdateRequest<T extends IndicesClusterStateUpdateRequest<T>> extends ClusterStateUpdateRequest<T> {
|
||||
|
||||
private String[] indices;
|
||||
private Index[] indices;
|
||||
|
||||
/**
|
||||
* Returns the indices the operation needs to be executed on
|
||||
*/
|
||||
public String[] indices() {
|
||||
public Index[] indices() {
|
||||
return indices;
|
||||
}
|
||||
|
||||
|
@ -36,7 +38,7 @@ public abstract class IndicesClusterStateUpdateRequest<T extends IndicesClusterS
|
|||
* Sets the indices the operation needs to be executed on
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public T indices(String[] indices) {
|
||||
public T indices(Index[] indices) {
|
||||
this.indices = indices;
|
||||
return (T)this;
|
||||
}
|
||||
|
|
|
@ -19,9 +19,7 @@
|
|||
|
||||
package org.elasticsearch.cluster.action.index;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
|
||||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.client.IndicesAdminClient;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -31,6 +29,7 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
|
||||
|
@ -65,48 +64,20 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
this.client = client.admin().indices();
|
||||
}
|
||||
|
||||
private PutMappingRequestBuilder updateMappingRequest(String index, String type, Mapping mappingUpdate, final TimeValue timeout) {
|
||||
private PutMappingRequestBuilder updateMappingRequest(Index index, String type, Mapping mappingUpdate, final TimeValue timeout) {
|
||||
if (type.equals(MapperService.DEFAULT_MAPPING)) {
|
||||
throw new IllegalArgumentException("_default_ mapping should not be updated");
|
||||
}
|
||||
return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString())
|
||||
return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString())
|
||||
.setMasterNodeTimeout(timeout).setTimeout(timeout);
|
||||
}
|
||||
|
||||
public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) {
|
||||
final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout);
|
||||
if (listener == null) {
|
||||
request.execute();
|
||||
} else {
|
||||
final ActionListener<PutMappingResponse> actionListener = new ActionListener<PutMappingResponse>() {
|
||||
@Override
|
||||
public void onResponse(PutMappingResponse response) {
|
||||
if (response.isAcknowledged()) {
|
||||
listener.onMappingUpdate();
|
||||
} else {
|
||||
listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
};
|
||||
request.execute(actionListener);
|
||||
}
|
||||
}
|
||||
|
||||
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
|
||||
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)}
|
||||
* Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)}
|
||||
* using the default timeout.
|
||||
*/
|
||||
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
|
||||
updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
|
||||
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) throws Exception {
|
||||
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -114,19 +85,9 @@ public class MappingUpdatedAction extends AbstractComponent {
|
|||
* {@code timeout}. When this method returns successfully mappings have
|
||||
* been applied to the master node and propagated to data nodes.
|
||||
*/
|
||||
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
|
||||
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
|
||||
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
|
||||
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* A listener to be notified when the mappings were updated
|
||||
*/
|
||||
public static interface MappingUpdateListener {
|
||||
|
||||
void onMappingUpdate();
|
||||
|
||||
void onFailure(Throwable t);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
listeners.remove(listener);
|
||||
}
|
||||
|
||||
public void nodeIndexDeleted(final ClusterState clusterState, final String index, final IndexSettings indexSettings, final String nodeId) {
|
||||
public void nodeIndexDeleted(final ClusterState clusterState, final Index index, final IndexSettings indexSettings, final String nodeId) {
|
||||
final DiscoveryNodes nodes = clusterState.nodes();
|
||||
transportService.sendRequest(clusterState.nodes().masterNode(),
|
||||
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
|
@ -97,7 +97,7 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
});
|
||||
}
|
||||
|
||||
private void lockIndexAndAck(String index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException {
|
||||
private void lockIndexAndAck(Index index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException {
|
||||
try {
|
||||
// we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store to the
|
||||
// master. If we can't acquire the locks here immediately there might be a shard of this index still holding on to the lock
|
||||
|
@ -114,9 +114,9 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
}
|
||||
|
||||
public interface Listener {
|
||||
void onNodeIndexDeleted(String index, String nodeId);
|
||||
void onNodeIndexDeleted(Index index, String nodeId);
|
||||
|
||||
void onNodeIndexStoreDeleted(String index, String nodeId);
|
||||
void onNodeIndexStoreDeleted(Index index, String nodeId);
|
||||
}
|
||||
|
||||
private class NodeIndexDeletedTransportHandler implements TransportRequestHandler<NodeIndexDeletedMessage> {
|
||||
|
@ -143,13 +143,13 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
|
||||
public static class NodeIndexDeletedMessage extends TransportRequest {
|
||||
|
||||
String index;
|
||||
Index index;
|
||||
String nodeId;
|
||||
|
||||
public NodeIndexDeletedMessage() {
|
||||
}
|
||||
|
||||
NodeIndexDeletedMessage(String index, String nodeId) {
|
||||
NodeIndexDeletedMessage(Index index, String nodeId) {
|
||||
this.index = index;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
@ -157,27 +157,27 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
index.writeTo(out);
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
index = new Index(in);
|
||||
nodeId = in.readString();
|
||||
}
|
||||
}
|
||||
|
||||
public static class NodeIndexStoreDeletedMessage extends TransportRequest {
|
||||
|
||||
String index;
|
||||
Index index;
|
||||
String nodeId;
|
||||
|
||||
public NodeIndexStoreDeletedMessage() {
|
||||
}
|
||||
|
||||
NodeIndexStoreDeletedMessage(String index, String nodeId) {
|
||||
NodeIndexStoreDeletedMessage(Index index, String nodeId) {
|
||||
this.index = index;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
@ -185,14 +185,14 @@ public class NodeIndexDeletedAction extends AbstractComponent {
|
|||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(index);
|
||||
index.writeTo(out);
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
index = in.readString();
|
||||
index = new Index(in);
|
||||
nodeId = in.readString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.joda.DateMathParser;
|
|||
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
@ -65,11 +66,20 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options
|
||||
* are encapsulated in the specified request.
|
||||
*/
|
||||
public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {
|
||||
Context context = new Context(state, request.indicesOptions());
|
||||
return concreteIndexNames(context, request.indices());
|
||||
}
|
||||
|
||||
/**
|
||||
* Same as {@link #concreteIndices(ClusterState, IndicesOptions, String...)}, but the index expressions and options
|
||||
* are encapsulated in the specified request.
|
||||
*/
|
||||
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
public Index[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
Context context = new Context(state, request.indicesOptions());
|
||||
return concreteIndices(context, request.indices());
|
||||
}
|
||||
|
@ -87,7 +97,25 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
|
||||
* indices options in the context don't allow such a case.
|
||||
*/
|
||||
public String[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) {
|
||||
public String[] concreteIndexNames(ClusterState state, IndicesOptions options, String... indexExpressions) {
|
||||
Context context = new Context(state, options);
|
||||
return concreteIndexNames(context, indexExpressions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Translates the provided index expression into actual concrete indices, properly deduplicated.
|
||||
*
|
||||
* @param state the cluster state containing all the data to resolve to expressions to concrete indices
|
||||
* @param options defines how the aliases or indices need to be resolved to concrete indices
|
||||
* @param indexExpressions expressions that can be resolved to alias or index names.
|
||||
* @return the resolved concrete indices based on the cluster state, indices options and index expressions
|
||||
* @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the
|
||||
* provided indices options in the context don't allow such a case, or if the final result of the indices resolution
|
||||
* contains no indices and the indices options in the context don't allow such a case.
|
||||
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
|
||||
* indices options in the context don't allow such a case.
|
||||
*/
|
||||
public Index[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) {
|
||||
Context context = new Context(state, options);
|
||||
return concreteIndices(context, indexExpressions);
|
||||
}
|
||||
|
@ -105,12 +133,21 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
|
||||
* indices options in the context don't allow such a case.
|
||||
*/
|
||||
public String[] concreteIndices(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) {
|
||||
public String[] concreteIndexNames(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) {
|
||||
Context context = new Context(state, options, startTime);
|
||||
return concreteIndices(context, indexExpressions);
|
||||
return concreteIndexNames(context, indexExpressions);
|
||||
}
|
||||
|
||||
String[] concreteIndices(Context context, String... indexExpressions) {
|
||||
String[] concreteIndexNames(Context context, String... indexExpressions) {
|
||||
Index[] indexes = concreteIndices(context, indexExpressions);
|
||||
String[] names = new String[indexes.length];
|
||||
for (int i = 0; i < indexes.length; i++) {
|
||||
names[i] = indexes[i].getName();
|
||||
}
|
||||
return names;
|
||||
}
|
||||
|
||||
Index[] concreteIndices(Context context, String... indexExpressions) {
|
||||
if (indexExpressions == null || indexExpressions.length == 0) {
|
||||
indexExpressions = new String[]{MetaData.ALL};
|
||||
}
|
||||
|
@ -136,11 +173,11 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
infe.setResources("index_expression", indexExpressions);
|
||||
throw infe;
|
||||
} else {
|
||||
return Strings.EMPTY_ARRAY;
|
||||
return Index.EMPTY_ARRAY;
|
||||
}
|
||||
}
|
||||
|
||||
final Set<String> concreteIndices = new HashSet<>(expressions.size());
|
||||
final Set<Index> concreteIndices = new HashSet<>(expressions.size());
|
||||
for (String expression : expressions) {
|
||||
AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression);
|
||||
if (aliasOrIndex == null) {
|
||||
|
@ -169,11 +206,11 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
throw new IndexClosedException(index.getIndex());
|
||||
} else {
|
||||
if (options.forbidClosedIndices() == false) {
|
||||
concreteIndices.add(index.getIndex().getName());
|
||||
concreteIndices.add(index.getIndex());
|
||||
}
|
||||
}
|
||||
} else if (index.getState() == IndexMetaData.State.OPEN) {
|
||||
concreteIndices.add(index.getIndex().getName());
|
||||
concreteIndices.add(index.getIndex());
|
||||
} else {
|
||||
throw new IllegalStateException("index state [" + index.getState() + "] not supported");
|
||||
}
|
||||
|
@ -185,7 +222,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
infe.setResources("index_expression", indexExpressions);
|
||||
throw infe;
|
||||
}
|
||||
return concreteIndices.toArray(new String[concreteIndices.size()]);
|
||||
return concreteIndices.toArray(new Index[concreteIndices.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -200,9 +237,9 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
* @throws IllegalArgumentException if the index resolution lead to more than one index
|
||||
* @return the concrete index obtained as a result of the index resolution
|
||||
*/
|
||||
public String concreteSingleIndex(ClusterState state, IndicesRequest request) {
|
||||
public Index concreteSingleIndex(ClusterState state, IndicesRequest request) {
|
||||
String indexExpression = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null;
|
||||
String[] indices = concreteIndices(state, request.indicesOptions(), indexExpression);
|
||||
Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression);
|
||||
if (indices.length != 1) {
|
||||
throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices");
|
||||
}
|
||||
|
@ -867,7 +904,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
* Returns <code>true</code> iff the given expression resolves to the given index name otherwise <code>false</code>
|
||||
*/
|
||||
public final boolean matchesIndex(String indexName, String expression, ClusterState state) {
|
||||
final String[] concreteIndices = concreteIndices(state, IndicesOptions.lenientExpandOpen(), expression);
|
||||
final String[] concreteIndices = concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), expression);
|
||||
for (String index : concreteIndices) {
|
||||
if (Regex.simpleMatch(index, indexName)) {
|
||||
return true;
|
||||
|
|
|
@ -84,20 +84,10 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
|
|||
|
||||
private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis");
|
||||
|
||||
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter,
|
||||
Version version) throws TimestampParsingException {
|
||||
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException {
|
||||
try {
|
||||
// no need for unix timestamp parsing in 2.x
|
||||
FormatDateTimeFormatter formatter = version.onOrAfter(Version.V_2_0_0_beta1) ? dateTimeFormatter : EPOCH_MILLIS_PARSER;
|
||||
return Long.toString(formatter.parser().parseMillis(timestampAsString));
|
||||
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));
|
||||
} catch (RuntimeException e) {
|
||||
if (version.before(Version.V_2_0_0_beta1)) {
|
||||
try {
|
||||
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));
|
||||
} catch (RuntimeException e1) {
|
||||
throw new TimestampParsingException(timestampAsString, e1);
|
||||
}
|
||||
}
|
||||
throw new TimestampParsingException(timestampAsString, e);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -232,7 +232,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
public boolean equalsAliases(MetaData other) {
|
||||
for (ObjectCursor<IndexMetaData> cursor : other.indices().values()) {
|
||||
IndexMetaData otherIndex = cursor.value;
|
||||
IndexMetaData thisIndex= index(otherIndex.getIndex());
|
||||
IndexMetaData thisIndex = index(otherIndex.getIndex());
|
||||
if (thisIndex == null) {
|
||||
return false;
|
||||
}
|
||||
|
@ -457,7 +457,28 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
}
|
||||
|
||||
public IndexMetaData index(Index index) {
|
||||
return index(index.getName());
|
||||
IndexMetaData metaData = index(index.getName());
|
||||
if (metaData != null && metaData.getIndexUUID().equals(index.getUUID())) {
|
||||
return metaData;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@link IndexMetaData} for this index.
|
||||
* @throws IndexNotFoundException if no metadata for this index is found
|
||||
*/
|
||||
public IndexMetaData getIndexSafe(Index index) {
|
||||
IndexMetaData metaData = index(index.getName());
|
||||
if (metaData != null) {
|
||||
if(metaData.getIndexUUID().equals(index.getUUID())) {
|
||||
return metaData;
|
||||
}
|
||||
throw new IndexNotFoundException(index,
|
||||
new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID()
|
||||
+ "] but got: [" + metaData.getIndexUUID() +"]"));
|
||||
}
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
||||
public ImmutableOpenMap<String, IndexMetaData> indices() {
|
||||
|
@ -488,20 +509,13 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
return (T) customs.get(type);
|
||||
}
|
||||
|
||||
public int totalNumberOfShards() {
|
||||
|
||||
public int getTotalNumberOfShards() {
|
||||
return this.totalNumberOfShards;
|
||||
}
|
||||
|
||||
public int getTotalNumberOfShards() {
|
||||
return totalNumberOfShards();
|
||||
}
|
||||
|
||||
public int numberOfShards() {
|
||||
return this.numberOfShards;
|
||||
}
|
||||
|
||||
public int getNumberOfShards() {
|
||||
return numberOfShards();
|
||||
return this.numberOfShards;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -844,6 +858,19 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
|||
return indices.get(index);
|
||||
}
|
||||
|
||||
public IndexMetaData getSafe(Index index) {
|
||||
IndexMetaData indexMetaData = get(index.getName());
|
||||
if (indexMetaData != null) {
|
||||
if(indexMetaData.getIndexUUID().equals(index.getUUID())) {
|
||||
return indexMetaData;
|
||||
}
|
||||
throw new IndexNotFoundException(index,
|
||||
new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID()
|
||||
+ "] but got: [" + indexMetaData.getIndexUUID() +"]"));
|
||||
}
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
||||
public Builder remove(String index) {
|
||||
indices.remove(index);
|
||||
return this;
|
||||
|
|
|
@ -35,14 +35,17 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -68,10 +71,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void deleteIndices(final Request request, final Listener userListener) {
|
||||
Set<String> indices = Sets.newHashSet(request.indices);
|
||||
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
|
||||
|
||||
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
clusterService.submitStateUpdateTask("delete-index " + request.indices, new ClusterStateUpdateTask(Priority.URGENT) {
|
||||
|
||||
@Override
|
||||
public TimeValue timeout() {
|
||||
|
@ -85,23 +87,21 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
final MetaData meta = currentState.metaData();
|
||||
final Set<IndexMetaData> metaDatas = request.indices.stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet());
|
||||
// Check if index deletion conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexDeletion(currentState, indices);
|
||||
|
||||
SnapshotsService.checkIndexDeletion(currentState, metaDatas);
|
||||
final Set<Index> indices = request.indices;
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(meta);
|
||||
ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
|
||||
|
||||
for (final String index: indices) {
|
||||
if (!currentState.metaData().hasConcreteIndex(index)) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
||||
for (final Index index : indices) {
|
||||
String indexName = index.getName();
|
||||
logger.debug("[{}] deleting index", index);
|
||||
|
||||
routingTableBuilder.remove(index);
|
||||
clusterBlocksBuilder.removeIndexBlocks(index);
|
||||
metaDataBuilder.remove(index);
|
||||
routingTableBuilder.remove(indexName);
|
||||
clusterBlocksBuilder.removeIndexBlocks(indexName);
|
||||
metaDataBuilder.remove(indexName);
|
||||
}
|
||||
// wait for events from all nodes that it has been removed from their respective metadata...
|
||||
int count = currentState.nodes().size();
|
||||
|
@ -112,7 +112,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
// this listener will be notified once we get back a notification based on the cluster state change below.
|
||||
final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() {
|
||||
@Override
|
||||
public void onNodeIndexDeleted(String deleted, String nodeId) {
|
||||
public void onNodeIndexDeleted(Index deleted, String nodeId) {
|
||||
if (indices.contains(deleted)) {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
listener.onResponse(new Response(true));
|
||||
|
@ -122,7 +122,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void onNodeIndexStoreDeleted(String deleted, String nodeId) {
|
||||
public void onNodeIndexStoreDeleted(Index deleted, String nodeId) {
|
||||
if (indices.contains(deleted)) {
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
listener.onResponse(new Response(true));
|
||||
|
@ -187,12 +187,12 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
|
|||
|
||||
public static class Request {
|
||||
|
||||
final String[] indices;
|
||||
final Set<Index> indices;
|
||||
|
||||
TimeValue timeout = TimeValue.timeValueSeconds(10);
|
||||
TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT;
|
||||
|
||||
public Request(String[] indices) {
|
||||
public Request(Set<Index> indices) {
|
||||
this.indices = indices;
|
||||
}
|
||||
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.Priority;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.snapshots.RestoreService;
|
||||
|
@ -82,15 +83,11 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
Set<String> indicesToClose = new HashSet<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
|
||||
Set<IndexMetaData> indicesToClose = new HashSet<>();
|
||||
for (Index index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
|
||||
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
|
||||
indicesToClose.add(index);
|
||||
indicesToClose.add(indexMetaData);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -102,22 +99,22 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
RestoreService.checkIndexClosing(currentState, indicesToClose);
|
||||
// Check if index closing conflicts with any running snapshots
|
||||
SnapshotsService.checkIndexClosing(currentState, indicesToClose);
|
||||
|
||||
logger.info("closing indices [{}]", indicesAsString);
|
||||
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
|
||||
.blocks(currentState.blocks());
|
||||
for (String index : indicesToClose) {
|
||||
mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE));
|
||||
blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK);
|
||||
for (IndexMetaData openIndexMetadata : indicesToClose) {
|
||||
final String indexName = openIndexMetadata.getIndex().getName();
|
||||
mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE));
|
||||
blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK);
|
||||
}
|
||||
|
||||
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
|
||||
|
||||
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
|
||||
for (String index : indicesToClose) {
|
||||
rtBuilder.remove(index);
|
||||
for (IndexMetaData index : indicesToClose) {
|
||||
rtBuilder.remove(index.getIndex().getName());
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(
|
||||
|
@ -143,14 +140,11 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
List<String> indicesToOpen = new ArrayList<>();
|
||||
for (String index : request.indices()) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
List<IndexMetaData> indicesToOpen = new ArrayList<>();
|
||||
for (Index index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
|
||||
if (indexMetaData.getState() != IndexMetaData.State.OPEN) {
|
||||
indicesToOpen.add(index);
|
||||
indicesToOpen.add(indexMetaData);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,20 +157,21 @@ public class MetaDataIndexStateService extends AbstractComponent {
|
|||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
|
||||
.blocks(currentState.blocks());
|
||||
for (String index : indicesToOpen) {
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build();
|
||||
for (IndexMetaData closedMetaData : indicesToOpen) {
|
||||
final String indexName = closedMetaData.getIndex().getName();
|
||||
IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build();
|
||||
// The index might be closed because we couldn't import it due to old incompatible version
|
||||
// We need to check that this index can be upgraded to the current version
|
||||
indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
|
||||
mdBuilder.put(indexMetaData, true);
|
||||
blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK);
|
||||
blocksBuilder.removeIndexBlock(indexName, INDEX_CLOSED_BLOCK);
|
||||
}
|
||||
|
||||
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
|
||||
|
||||
RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable());
|
||||
for (String index : indicesToOpen) {
|
||||
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().index(index));
|
||||
for (IndexMetaData index : indicesToOpen) {
|
||||
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex()));
|
||||
}
|
||||
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -216,31 +217,23 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
try {
|
||||
// precreate incoming indices;
|
||||
for (PutMappingClusterStateUpdateRequest request : tasks) {
|
||||
final List<Index> indices = new ArrayList<>(request.indices().length);
|
||||
try {
|
||||
for (String index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData != null) {
|
||||
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
|
||||
// if the index does not exists we create it once, add all types to the mapper service and
|
||||
// close it later once we are done with mapping update
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
|
||||
Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
|
||||
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
|
||||
}
|
||||
for (Index index : request.indices()) {
|
||||
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
|
||||
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
|
||||
// if the index does not exists we create it once, add all types to the mapper service and
|
||||
// close it later once we are done with mapping update
|
||||
indicesToClose.add(indexMetaData.getIndex());
|
||||
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
|
||||
Collections.emptyList());
|
||||
// add mappings for all types, we need them for cross-type validation
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
|
||||
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
|
||||
}
|
||||
indices.add(indexMetaData.getIndex());
|
||||
} else {
|
||||
// we didn't find the index in the clusterstate - maybe it was deleted
|
||||
// NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
}
|
||||
currentState = applyRequest(currentState, request, indices);
|
||||
currentState = applyRequest(currentState, request);
|
||||
builder.success(request);
|
||||
} catch (Throwable t) {
|
||||
builder.failure(request, t);
|
||||
|
@ -254,13 +247,20 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
|
||||
List<Index> indices) throws IOException {
|
||||
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
|
||||
String mappingType = request.type();
|
||||
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
|
||||
final MetaData metaData = currentState.metaData();
|
||||
for (Index index : indices) {
|
||||
final List<Tuple<IndexService, IndexMetaData>> updateList = new ArrayList<>();
|
||||
for (Index index : request.indices()) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(index);
|
||||
// IMPORTANT: always get the metadata from the state since it get's batched
|
||||
// and if we pull it from the indexService we might miss an update etc.
|
||||
final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index);
|
||||
|
||||
// this is paranoia... just to be sure we use the exact same indexService and metadata tuple on the update that
|
||||
// we used for the validation, it makes this mechanism little less scary (a little)
|
||||
updateList.add(new Tuple<>(indexService, indexMetaData));
|
||||
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
|
||||
DocumentMapper newMapper;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
|
||||
|
@ -281,7 +281,6 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
// and a put mapping api call, so we don't which type did exist before.
|
||||
// Also the order of the mappings may be backwards.
|
||||
if (newMapper.parentFieldMapper().active()) {
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
|
||||
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
|
||||
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
|
||||
|
@ -302,13 +301,12 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(metaData);
|
||||
for (Index index : indices) {
|
||||
for (Tuple<IndexService, IndexMetaData> toUpdate : updateList) {
|
||||
// do the actual merge here on the master, and update the mapping source
|
||||
IndexService indexService = indicesService.indexService(index);
|
||||
if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above
|
||||
continue;
|
||||
}
|
||||
|
||||
// we use the exact same indexService and metadata we used to validate above here to actually apply the update
|
||||
final IndexService indexService = toUpdate.v1();
|
||||
final IndexMetaData indexMetaData = toUpdate.v2();
|
||||
final Index index = indexMetaData.getIndex();
|
||||
CompressedXContent existingSource = null;
|
||||
DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
|
||||
if (existingMapper != null) {
|
||||
|
@ -323,24 +321,20 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
} else {
|
||||
// use the merged mapping source
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
|
||||
logger.info("{} update_mapping [{}]", index, mergedMapper.type());
|
||||
}
|
||||
|
||||
}
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
|
||||
logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
|
||||
} else if (logger.isInfoEnabled()) {
|
||||
logger.info("[{}] create_mapping [{}]", index, mappingType);
|
||||
logger.info("{} create_mapping [{}]", index, mappingType);
|
||||
}
|
||||
}
|
||||
|
||||
IndexMetaData indexMetaData = metaData.index(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
|
||||
// Mapping updates on a single type may have side-effects on other types so we need to
|
||||
// update mapping metadata on all types
|
||||
|
|
|
@ -23,7 +23,6 @@ import org.elasticsearch.Version;
|
|||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -43,7 +42,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
|
@ -86,7 +85,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// we will want to know this for translating "all" to a number
|
||||
final int dataNodeCount = event.state().nodes().dataNodes().size();
|
||||
|
||||
Map<Integer, List<String>> nrReplicasChanged = new HashMap<>();
|
||||
Map<Integer, List<Index>> nrReplicasChanged = new HashMap<>();
|
||||
// we need to do this each time in case it was changed by update settings
|
||||
for (final IndexMetaData indexMetaData : event.state().metaData()) {
|
||||
AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings());
|
||||
|
@ -117,7 +116,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
nrReplicasChanged.put(numberOfReplicas, new ArrayList<>());
|
||||
}
|
||||
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex().getName());
|
||||
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -126,25 +125,25 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// update settings and kick of a reroute (implicit) for them to take effect
|
||||
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
|
||||
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
|
||||
final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
|
||||
final List<Index> indices = nrReplicasChanged.get(fNumberOfReplicas);
|
||||
|
||||
UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest()
|
||||
.indices(indices.toArray(new String[indices.size()])).settings(settings)
|
||||
.indices(indices.toArray(new Index[indices.size()])).settings(settings)
|
||||
.ackTimeout(TimeValue.timeValueMillis(0)) //no need to wait for ack here
|
||||
.masterNodeTimeout(TimeValue.timeValueMinutes(10));
|
||||
|
||||
updateSettings(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
|
||||
@Override
|
||||
public void onResponse(ClusterStateUpdateResponse response) {
|
||||
for (String index : indices) {
|
||||
logger.info("[{}] auto expanded replicas to [{}]", index, fNumberOfReplicas);
|
||||
for (Index index : indices) {
|
||||
logger.info("{} auto expanded replicas to [{}]", index, fNumberOfReplicas);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
for (String index : indices) {
|
||||
logger.warn("[{}] fail to auto expand replicas to [{}]", index, fNumberOfReplicas);
|
||||
for (Index index : indices) {
|
||||
logger.warn("{} fail to auto expand replicas to [{}]", index, fNumberOfReplicas);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
@ -188,16 +187,19 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
|
||||
@Override
|
||||
public ClusterState execute(ClusterState currentState) {
|
||||
String[] actualIndices = indexNameExpressionResolver.concreteIndices(currentState, IndicesOptions.strictExpand(), request.indices());
|
||||
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
|
||||
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
// allow to change any settings to a close index, and only allow dynamic settings to be changed
|
||||
// on an open index
|
||||
Set<String> openIndices = new HashSet<>();
|
||||
Set<String> closeIndices = new HashSet<>();
|
||||
for (String index : actualIndices) {
|
||||
if (currentState.metaData().index(index).getState() == IndexMetaData.State.OPEN) {
|
||||
Set<Index> openIndices = new HashSet<>();
|
||||
Set<Index> closeIndices = new HashSet<>();
|
||||
final String[] actualIndices = new String[request.indices().length];
|
||||
for (int i = 0; i < request.indices().length; i++) {
|
||||
Index index = request.indices()[i];
|
||||
actualIndices[i] = index.getName();
|
||||
final IndexMetaData metaData = currentState.metaData().getIndexSafe(index);
|
||||
if (metaData.getState() == IndexMetaData.State.OPEN) {
|
||||
openIndices.add(index);
|
||||
} else {
|
||||
closeIndices.add(index);
|
||||
|
@ -206,13 +208,13 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
|
||||
if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT,
|
||||
"Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
|
||||
"Can't update [%s] on closed indices %s - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
|
||||
closeIndices
|
||||
));
|
||||
}
|
||||
if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) {
|
||||
throw new IllegalArgumentException(String.format(Locale.ROOT,
|
||||
"Can't update non dynamic settings[%s] for open indices [%s]",
|
||||
"Can't update non dynamic settings [%s] for open indices %s",
|
||||
skippedSettigns.getAsMap().keySet(),
|
||||
openIndices
|
||||
));
|
||||
|
@ -232,28 +234,22 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
|
||||
|
||||
if (!openIndices.isEmpty()) {
|
||||
for (String index : openIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
for (Index index : openIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.getSafe(index);
|
||||
Settings.Builder updates = Settings.builder();
|
||||
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) {
|
||||
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index.getName())) {
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!closeIndices.isEmpty()) {
|
||||
for (String index : closeIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.get(index);
|
||||
if (indexMetaData == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
}
|
||||
for (Index index : closeIndices) {
|
||||
IndexMetaData indexMetaData = metaDataBuilder.getSafe(index);
|
||||
Settings.Builder updates = Settings.builder();
|
||||
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
|
||||
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) {
|
||||
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index.getName())) {
|
||||
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
|
||||
}
|
||||
}
|
||||
|
@ -265,11 +261,11 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
|
|||
// now, reroute in case things change that require it (like number of replicas)
|
||||
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
|
||||
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
|
||||
for (String index : openIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
|
||||
for (Index index : openIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings());
|
||||
}
|
||||
for (String index : closeIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
|
||||
for (Index index : closeIndices) {
|
||||
indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings());
|
||||
}
|
||||
return updatedState;
|
||||
}
|
||||
|
|
|
@ -313,7 +313,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
|||
|
||||
@Override
|
||||
public IndexRoutingTable readFrom(StreamInput in) throws IOException {
|
||||
Index index = Index.readIndex(in);
|
||||
Index index = new Index(in);
|
||||
Builder builder = new Builder(index);
|
||||
|
||||
int size = in.readVInt();
|
||||
|
|
|
@ -584,7 +584,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
|||
}
|
||||
|
||||
public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException {
|
||||
Index index = Index.readIndex(in);
|
||||
Index index = new Index(in);
|
||||
return readFromThin(in, index);
|
||||
}
|
||||
|
||||
|
|
|
@ -328,7 +328,7 @@ public final class ShardRouting implements Streamable, ToXContent {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
readFrom(in, Index.readIndex(in), in.readVInt());
|
||||
readFrom(in, new Index(in), in.readVInt());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -320,7 +320,7 @@ public class AllocationService extends AbstractComponent {
|
|||
public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) {
|
||||
for (ShardRouting shardRouting : allocation.routingNodes().unassigned()) {
|
||||
final MetaData metaData = allocation.metaData();
|
||||
final IndexMetaData indexMetaData = metaData.index(shardRouting.index());
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index());
|
||||
shardRouting.unassignedInfo().updateDelay(allocation.getCurrentNanoTime(), settings, indexMetaData.getSettings());
|
||||
}
|
||||
}
|
||||
|
@ -340,7 +340,6 @@ public class AllocationService extends AbstractComponent {
|
|||
changed |= failReplicasForUnassignedPrimary(allocation, shardEntry);
|
||||
ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
|
||||
if (candidate != null) {
|
||||
IndexMetaData index = allocation.metaData().index(candidate.index());
|
||||
routingNodes.swapPrimaryFlag(shardEntry, candidate);
|
||||
if (candidate.relocatingNodeId() != null) {
|
||||
changed = true;
|
||||
|
@ -355,6 +354,7 @@ public class AllocationService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
}
|
||||
IndexMetaData index = allocation.metaData().getIndexSafe(candidate.index());
|
||||
if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) {
|
||||
routingNodes.reinitShadowPrimary(candidate);
|
||||
changed = true;
|
||||
|
|
|
@ -218,7 +218,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
|
|||
this.threshold = threshold;
|
||||
this.routingNodes = allocation.routingNodes();
|
||||
metaData = routingNodes.metaData();
|
||||
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / routingNodes.size();
|
||||
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
|
||||
buildModelFromAssigned();
|
||||
}
|
||||
|
||||
|
|
|
@ -112,7 +112,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation
|
|||
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName());
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
return explainOrThrowRejectedCommand(explain, allocation,
|
||||
"trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active");
|
||||
|
|
|
@ -153,7 +153,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "no allocation awareness enabled");
|
||||
}
|
||||
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.index());
|
||||
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
int shardCount = indexMetaData.getNumberOfReplicas() + 1; // 1 for primary
|
||||
for (String awarenessAttribute : awarenessAttributes) {
|
||||
// the node the shard exists on must be associated with an awareness attribute
|
||||
|
|
|
@ -342,7 +342,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
|||
}
|
||||
|
||||
// a flag for whether the primary shard has been previously allocated
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName());
|
||||
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
|
||||
|
||||
// checks for exact byte comparisons
|
||||
|
|
|
@ -101,7 +101,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName());
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
|
||||
final Allocation enable;
|
||||
if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) {
|
||||
enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings());
|
||||
|
@ -136,7 +136,7 @@ public class EnableAllocationDecider extends AllocationDecider {
|
|||
return allocation.decision(Decision.YES, NAME, "rebalance disabling is ignored");
|
||||
}
|
||||
|
||||
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
|
||||
Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings();
|
||||
final Rebalance enable;
|
||||
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
|
||||
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);
|
||||
|
|
|
@ -102,7 +102,7 @@ public class FilterAllocationDecider extends AllocationDecider {
|
|||
Decision decision = shouldClusterFilter(node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
decision = shouldIndexFilter(allocation.routingNodes().metaData().index(shardRouting.index()), node, allocation);
|
||||
decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation);
|
||||
if (decision != null) return decision;
|
||||
|
||||
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");
|
||||
|
|
|
@ -86,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
|
@ -123,7 +123,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
|
|||
|
||||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
|
||||
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
|
||||
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
|
||||
// Capture the limit here in case it changes during this method's
|
||||
// execution
|
||||
|
|
|
@ -553,9 +553,9 @@ public abstract class StreamInput extends InputStream {
|
|||
}
|
||||
}
|
||||
|
||||
public <T extends Writeable> T readOptionalWritable(T prototype) throws IOException {
|
||||
public <T extends Writeable> T readOptionalWritable(Writeable.IOFunction<StreamInput, T> provider) throws IOException {
|
||||
if (readBoolean()) {
|
||||
return (T) prototype.readFrom(this);
|
||||
return provider.apply(this);
|
||||
} else {
|
||||
return null;
|
||||
}
|
||||
|
|
|
@ -37,4 +37,15 @@ public interface Writeable<T> extends StreamableReader<T> {
|
|||
* Write this into the {@linkplain StreamOutput}.
|
||||
*/
|
||||
void writeTo(StreamOutput out) throws IOException;
|
||||
|
||||
@FunctionalInterface
|
||||
interface IOFunction<T, R> {
|
||||
/**
|
||||
* Applies this function to the given argument.
|
||||
*
|
||||
* @param t the function argument
|
||||
* @return the function result
|
||||
*/
|
||||
R apply(T t) throws IOException;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -269,7 +269,7 @@ public abstract class AsyncShardFetch<T extends BaseNodeResponse> implements Rel
|
|||
*/
|
||||
// visible for testing
|
||||
void asyncFetch(final ShardId shardId, final String[] nodesIds, final MetaData metaData) {
|
||||
IndexMetaData indexMetaData = metaData.index(shardId.getIndex());
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(shardId.getIndex());
|
||||
logger.trace("{} fetching [{}] from {}", shardId, type, nodesIds);
|
||||
action.list(shardId, indexMetaData, nodesIds, new ActionListener<BaseNodesResponse<T>>() {
|
||||
@Override
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.function.Supplier;
|
||||
|
@ -79,7 +80,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
}
|
||||
}
|
||||
|
||||
ObjectFloatHashMap<String> indices = new ObjectFloatHashMap<>();
|
||||
ObjectFloatHashMap<Index> indices = new ObjectFloatHashMap<>();
|
||||
MetaData electedGlobalState = null;
|
||||
int found = 0;
|
||||
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) {
|
||||
|
@ -93,7 +94,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
electedGlobalState = nodeState.metaData();
|
||||
}
|
||||
for (ObjectCursor<IndexMetaData> cursor : nodeState.metaData().indices().values()) {
|
||||
indices.addTo(cursor.value.getIndex().getName(), 1);
|
||||
indices.addTo(cursor.value.getIndex(), 1);
|
||||
}
|
||||
}
|
||||
if (found < requiredAllocation) {
|
||||
|
@ -107,7 +108,7 @@ public class Gateway extends AbstractComponent implements ClusterStateListener {
|
|||
final Object[] keys = indices.keys;
|
||||
for (int i = 0; i < keys.length; i++) {
|
||||
if (keys[i] != null) {
|
||||
String index = (String) keys[i];
|
||||
Index index = (Index) keys[i];
|
||||
IndexMetaData electedIndexMetaData = null;
|
||||
int indexMetaDataCount = 0;
|
||||
for (TransportNodesListGatewayMetaState.NodeGatewayMetaState nodeState : nodesState) {
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
|
@ -61,7 +62,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
@Nullable
|
||||
private volatile MetaData previousMetaData;
|
||||
|
||||
private volatile Set<String> previouslyWrittenIndices = emptySet();
|
||||
private volatile Set<Index> previouslyWrittenIndices = emptySet();
|
||||
|
||||
@Inject
|
||||
public GatewayMetaState(Settings settings, NodeEnvironment nodeEnv, MetaStateService metaStateService,
|
||||
|
@ -102,7 +103,6 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
@Override
|
||||
public void clusterChanged(ClusterChangedEvent event) {
|
||||
|
||||
Set<String> relevantIndices = new HashSet<>();
|
||||
final ClusterState state = event.state();
|
||||
if (state.blocks().disableStatePersistence()) {
|
||||
// reset the current metadata, we need to start fresh...
|
||||
|
@ -113,7 +113,7 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
|
||||
MetaData newMetaData = state.metaData();
|
||||
// we don't check if metaData changed, since we might be called several times and we need to check dangling...
|
||||
|
||||
Set<Index> relevantIndices = Collections.emptySet();
|
||||
boolean success = true;
|
||||
// write the state if this node is a master eligible node or if it is a data node and has shards allocated on it
|
||||
if (state.nodes().localNode().masterNode() || state.nodes().localNode().dataNode()) {
|
||||
|
@ -126,14 +126,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
// persistence was disabled or the node was restarted), see getRelevantIndicesOnDataOnlyNode().
|
||||
// we therefore have to check here if we have shards on disk and add their indices to the previouslyWrittenIndices list
|
||||
if (isDataOnlyNode(state)) {
|
||||
Set<String> newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size());
|
||||
Set<Index> newPreviouslyWrittenIndices = new HashSet<>(previouslyWrittenIndices.size());
|
||||
for (IndexMetaData indexMetaData : newMetaData) {
|
||||
IndexMetaData indexMetaDataOnDisk = null;
|
||||
if (indexMetaData.getState().equals(IndexMetaData.State.CLOSE)) {
|
||||
indexMetaDataOnDisk = metaStateService.loadIndexState(indexMetaData.getIndex().getName());
|
||||
}
|
||||
if (indexMetaDataOnDisk != null) {
|
||||
newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex().getName());
|
||||
newPreviouslyWrittenIndices.add(indexMetaDataOnDisk.getIndex());
|
||||
}
|
||||
}
|
||||
newPreviouslyWrittenIndices.addAll(previouslyWrittenIndices);
|
||||
|
@ -152,9 +152,9 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
Iterable<IndexMetaWriteInfo> writeInfo;
|
||||
|
||||
relevantIndices = getRelevantIndices(event.state(), event.previousState(), previouslyWrittenIndices);
|
||||
writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData());
|
||||
final Iterable<IndexMetaWriteInfo> writeInfo = resolveStatesToBeWritten(previouslyWrittenIndices, relevantIndices, previousMetaData, event.state().metaData());
|
||||
// check and write changes in indices
|
||||
for (IndexMetaWriteInfo indexMetaWrite : writeInfo) {
|
||||
try {
|
||||
|
@ -173,8 +173,8 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
}
|
||||
}
|
||||
|
||||
public static Set<String> getRelevantIndices(ClusterState state, ClusterState previousState, Set<String> previouslyWrittenIndices) {
|
||||
Set<String> relevantIndices;
|
||||
public static Set<Index> getRelevantIndices(ClusterState state, ClusterState previousState, Set<Index> previouslyWrittenIndices) {
|
||||
Set<Index> relevantIndices;
|
||||
if (isDataOnlyNode(state)) {
|
||||
relevantIndices = getRelevantIndicesOnDataOnlyNode(state, previousState, previouslyWrittenIndices);
|
||||
} else if (state.nodes().localNode().masterNode() == true) {
|
||||
|
@ -264,10 +264,10 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
* @param newMetaData The new metadata
|
||||
* @return iterable over all indices states that should be written to disk
|
||||
*/
|
||||
public static Iterable<GatewayMetaState.IndexMetaWriteInfo> resolveStatesToBeWritten(Set<String> previouslyWrittenIndices, Set<String> potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) {
|
||||
public static Iterable<GatewayMetaState.IndexMetaWriteInfo> resolveStatesToBeWritten(Set<Index> previouslyWrittenIndices, Set<Index> potentiallyUnwrittenIndices, MetaData previousMetaData, MetaData newMetaData) {
|
||||
List<GatewayMetaState.IndexMetaWriteInfo> indicesToWrite = new ArrayList<>();
|
||||
for (String index : potentiallyUnwrittenIndices) {
|
||||
IndexMetaData newIndexMetaData = newMetaData.index(index);
|
||||
for (Index index : potentiallyUnwrittenIndices) {
|
||||
IndexMetaData newIndexMetaData = newMetaData.getIndexSafe(index);
|
||||
IndexMetaData previousIndexMetaData = previousMetaData == null ? null : previousMetaData.index(index);
|
||||
String writeReason = null;
|
||||
if (previouslyWrittenIndices.contains(index) == false || previousIndexMetaData == null) {
|
||||
|
@ -282,14 +282,14 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
return indicesToWrite;
|
||||
}
|
||||
|
||||
public static Set<String> getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set<String> previouslyWrittenIndices) {
|
||||
public static Set<Index> getRelevantIndicesOnDataOnlyNode(ClusterState state, ClusterState previousState, Set<Index> previouslyWrittenIndices) {
|
||||
RoutingNode newRoutingNode = state.getRoutingNodes().node(state.nodes().localNodeId());
|
||||
if (newRoutingNode == null) {
|
||||
throw new IllegalStateException("cluster state does not contain this node - cannot write index meta state");
|
||||
}
|
||||
Set<String> indices = new HashSet<>();
|
||||
Set<Index> indices = new HashSet<>();
|
||||
for (ShardRouting routing : newRoutingNode) {
|
||||
indices.add(routing.index().getName());
|
||||
indices.add(routing.index());
|
||||
}
|
||||
// we have to check the meta data also: closed indices will not appear in the routing table, but we must still write the state if we have it written on disk previously
|
||||
for (IndexMetaData indexMetaData : state.metaData()) {
|
||||
|
@ -300,19 +300,19 @@ public class GatewayMetaState extends AbstractComponent implements ClusterStateL
|
|||
if (previousMetaData != null) {
|
||||
isOrWasClosed = isOrWasClosed || previousMetaData.getState().equals(IndexMetaData.State.CLOSE);
|
||||
}
|
||||
if (previouslyWrittenIndices.contains(indexMetaData.getIndex().getName()) && isOrWasClosed) {
|
||||
indices.add(indexMetaData.getIndex().getName());
|
||||
if (previouslyWrittenIndices.contains(indexMetaData.getIndex()) && isOrWasClosed) {
|
||||
indices.add(indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
return indices;
|
||||
}
|
||||
|
||||
public static Set<String> getRelevantIndicesForMasterEligibleNode(ClusterState state) {
|
||||
Set<String> relevantIndices;
|
||||
public static Set<Index> getRelevantIndicesForMasterEligibleNode(ClusterState state) {
|
||||
Set<Index> relevantIndices;
|
||||
relevantIndices = new HashSet<>();
|
||||
// we have to iterate over the metadata to make sure we also capture closed indices
|
||||
for (IndexMetaData indexMetaData : state.metaData()) {
|
||||
relevantIndices.add(indexMetaData.getIndex().getName());
|
||||
relevantIndices.add(indexMetaData.getIndex());
|
||||
}
|
||||
return relevantIndices;
|
||||
}
|
||||
|
|
|
@ -94,7 +94,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
final IndexMetaData indexMetaData = metaData.index(shard.getIndexName());
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
// don't go wild here and create a new IndexSetting object for every shard this could cause a lot of garbage
|
||||
// on cluster restart if we allocate a boat load of shards
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
||||
import java.util.Comparator;
|
||||
|
||||
|
@ -42,8 +43,8 @@ public abstract class PriorityComparator implements Comparator<ShardRouting> {
|
|||
final String o2Index = o2.getIndexName();
|
||||
int cmp = 0;
|
||||
if (o1Index.equals(o2Index) == false) {
|
||||
final Settings settingsO1 = getIndexSettings(o1Index);
|
||||
final Settings settingsO2 = getIndexSettings(o2Index);
|
||||
final Settings settingsO1 = getIndexSettings(o1.index());
|
||||
final Settings settingsO2 = getIndexSettings(o2.index());
|
||||
cmp = Long.compare(priority(settingsO2), priority(settingsO1));
|
||||
if (cmp == 0) {
|
||||
cmp = Long.compare(timeCreated(settingsO2), timeCreated(settingsO1));
|
||||
|
@ -63,7 +64,7 @@ public abstract class PriorityComparator implements Comparator<ShardRouting> {
|
|||
return settings.getAsLong(IndexMetaData.SETTING_CREATION_DATE, -1L);
|
||||
}
|
||||
|
||||
protected abstract Settings getIndexSettings(String index);
|
||||
protected abstract Settings getIndexSettings(Index index);
|
||||
|
||||
/**
|
||||
* Returns a PriorityComparator that uses the RoutingAllocation index metadata to access the index setting per index.
|
||||
|
@ -71,8 +72,8 @@ public abstract class PriorityComparator implements Comparator<ShardRouting> {
|
|||
public static PriorityComparator getAllocationComparator(final RoutingAllocation allocation) {
|
||||
return new PriorityComparator() {
|
||||
@Override
|
||||
protected Settings getIndexSettings(String index) {
|
||||
IndexMetaData indexMetaData = allocation.metaData().index(index);
|
||||
protected Settings getIndexSettings(Index index) {
|
||||
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(index);
|
||||
return indexMetaData.getSettings();
|
||||
}
|
||||
};
|
||||
|
|
|
@ -74,7 +74,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
IndexMetaData indexMetaData = metaData.index(shard.getIndexName());
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
continue;
|
||||
}
|
||||
|
@ -129,7 +129,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
IndexMetaData indexMetaData = metaData.index(shard.getIndexName());
|
||||
IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
if (shard.allocatedPostIndexCreate(indexMetaData) == false) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -125,7 +125,7 @@ public class TransportNodesListGatewayStartedShards extends TransportNodesAction
|
|||
logger.trace("{} loading local shard state info", shardId);
|
||||
ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, nodeEnv.availableShardPaths(request.shardId));
|
||||
if (shardStateMetaData != null) {
|
||||
final IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndexName()); // it's a mystery why this is sometimes null
|
||||
final IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex()); // it's a mystery why this is sometimes null
|
||||
if (metaData != null) {
|
||||
ShardPath shardPath = null;
|
||||
try {
|
||||
|
|
|
@ -31,7 +31,7 @@ import java.io.IOException;
|
|||
*/
|
||||
public class Index implements Writeable<Index> {
|
||||
|
||||
private final static Index PROTO = new Index("", "");
|
||||
public static final Index[] EMPTY_ARRAY = new Index[0];
|
||||
|
||||
private final String name;
|
||||
private final String uuid;
|
||||
|
@ -41,6 +41,12 @@ public class Index implements Writeable<Index> {
|
|||
this.uuid = uuid.intern();
|
||||
}
|
||||
|
||||
public Index(StreamInput in) throws IOException {
|
||||
this.name = in.readString();
|
||||
this.uuid = in.readString();
|
||||
}
|
||||
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
@ -80,13 +86,9 @@ public class Index implements Writeable<Index> {
|
|||
return result;
|
||||
}
|
||||
|
||||
public static Index readIndex(StreamInput in) throws IOException {
|
||||
return PROTO.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Index readFrom(StreamInput in) throws IOException {
|
||||
return new Index(in.readString(), in.readString());
|
||||
return new Index(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -98,7 +98,7 @@ public class ShardId implements Streamable, Comparable<ShardId> {
|
|||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
index = Index.readIndex(in);
|
||||
index = new Index(in);
|
||||
shardId = in.readVInt();
|
||||
hashCode = computeHashCode();
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ public class NodeIndicesStats implements Streamable, ToXContent {
|
|||
int entries = in.readVInt();
|
||||
statsByShard = new HashMap<>();
|
||||
for (int i = 0; i < entries; i++) {
|
||||
Index index = Index.readIndex(in);
|
||||
Index index = new Index(in);
|
||||
int indexShardListSize = in.readVInt();
|
||||
List<IndexShardStats> indexShardStats = new ArrayList<>(indexShardListSize);
|
||||
for (int j = 0; j < indexShardListSize; j++) {
|
||||
|
|
|
@ -188,15 +188,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
// handle closed indices, since they are not allocated on a node once they are closed
|
||||
// so applyDeletedIndices might not take them into account
|
||||
for (IndexService indexService : indicesService) {
|
||||
String index = indexService.index().getName();
|
||||
Index index = indexService.index();
|
||||
IndexMetaData indexMetaData = event.state().metaData().index(index);
|
||||
if (indexMetaData != null && indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
for (Integer shardId : indexService.shardIds()) {
|
||||
logger.debug("[{}][{}] removing shard (index is closed)", index, shardId);
|
||||
logger.debug("{}[{}] removing shard (index is closed)", index, shardId);
|
||||
try {
|
||||
indexService.removeShard(shardId, "removing shard (index is closed)");
|
||||
} catch (Throwable e) {
|
||||
logger.warn("[{}] failed to remove shard (index is closed)", e, index);
|
||||
logger.warn("{} failed to remove shard (index is closed)", e, index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -238,13 +238,12 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
indexSettings = idxService.getIndexSettings();
|
||||
deleteIndex(index, "index no longer part of the metadata");
|
||||
} else {
|
||||
final IndexMetaData metaData = previousState.metaData().index(index);
|
||||
assert metaData != null;
|
||||
final IndexMetaData metaData = previousState.metaData().getIndexSafe(index);
|
||||
indexSettings = new IndexSettings(metaData, settings);
|
||||
indicesService.deleteClosedIndex("closed index no longer part of the metadata", metaData, event.state());
|
||||
}
|
||||
try {
|
||||
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index.getName(), indexSettings, localNodeId);
|
||||
nodeIndexDeletedAction.nodeIndexDeleted(event.state(), index, indexSettings, localNodeId);
|
||||
} catch (Throwable e) {
|
||||
logger.debug("failed to send to master index {} deleted event", e, index);
|
||||
}
|
||||
|
@ -260,15 +259,15 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
IntHashSet newShardIds = new IntHashSet();
|
||||
for (IndexService indexService : indicesService) {
|
||||
String indexName = indexService.index().getName();
|
||||
IndexMetaData indexMetaData = event.state().metaData().index(indexName);
|
||||
Index index = indexService.index();
|
||||
IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(index);
|
||||
if (indexMetaData == null) {
|
||||
continue;
|
||||
}
|
||||
// now, go over and delete shards that needs to get deleted
|
||||
newShardIds.clear();
|
||||
for (ShardRouting shard : routingNode) {
|
||||
if (shard.index().getName().equals(indexName)) {
|
||||
if (shard.index().equals(index)) {
|
||||
newShardIds.add(shard.id());
|
||||
}
|
||||
}
|
||||
|
@ -276,14 +275,14 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
if (!newShardIds.contains(existingShardId)) {
|
||||
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}][{}] removing shard (index is closed)", indexName, existingShardId);
|
||||
logger.debug("{}[{}] removing shard (index is closed)", index, existingShardId);
|
||||
}
|
||||
indexService.removeShard(existingShardId, "removing shard (index is closed)");
|
||||
} else {
|
||||
// we can just remove the shard, without cleaning it locally, since we will clean it
|
||||
// when all shards are allocated in the IndicesStore
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}][{}] removing shard (not allocated)", indexName, existingShardId);
|
||||
logger.debug("{}[{}] removing shard (not allocated)", index, existingShardId);
|
||||
}
|
||||
indexService.removeShard(existingShardId, "removing shard (not allocated)");
|
||||
}
|
||||
|
@ -300,7 +299,7 @@ public class IndicesClusterStateService extends AbstractLifecycleComponent<Indic
|
|||
}
|
||||
for (ShardRouting shard : routingNode) {
|
||||
if (!indicesService.hasIndex(shard.index())) {
|
||||
final IndexMetaData indexMetaData = event.state().metaData().index(shard.index());
|
||||
final IndexMetaData indexMetaData = event.state().metaData().getIndexSafe(shard.index());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] creating index", indexMetaData.getIndex());
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
|
@ -111,15 +112,15 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
*/
|
||||
public void attemptSyncedFlush(final String[] aliasesOrIndices, IndicesOptions indicesOptions, final ActionListener<SyncedFlushResponse> listener) {
|
||||
final ClusterState state = clusterService.state();
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
|
||||
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, indicesOptions, aliasesOrIndices);
|
||||
final Map<String, List<ShardsSyncedFlushResult>> results = ConcurrentCollections.newConcurrentMap();
|
||||
int totalNumberOfShards = 0;
|
||||
int numberOfShards = 0;
|
||||
for (String index : concreteIndices) {
|
||||
final IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
for (Index index : concreteIndices) {
|
||||
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(index);
|
||||
totalNumberOfShards += indexMetaData.getTotalNumberOfShards();
|
||||
numberOfShards += indexMetaData.getNumberOfShards();
|
||||
results.put(index, Collections.synchronizedList(new ArrayList<>()));
|
||||
results.put(index.getName(), Collections.synchronizedList(new ArrayList<>()));
|
||||
|
||||
}
|
||||
if (numberOfShards == 0) {
|
||||
|
@ -129,8 +130,9 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
final int finalTotalNumberOfShards = totalNumberOfShards;
|
||||
final CountDown countDown = new CountDown(numberOfShards);
|
||||
|
||||
for (final String index : concreteIndices) {
|
||||
final IndexMetaData indexMetaData = state.metaData().index(index);
|
||||
for (final Index concreteIndex : concreteIndices) {
|
||||
final String index = concreteIndex.getName();
|
||||
final IndexMetaData indexMetaData = state.metaData().getIndexSafe(concreteIndex);
|
||||
final int indexNumberOfShards = indexMetaData.getNumberOfShards();
|
||||
for (int shard = 0; shard < indexNumberOfShards; shard++) {
|
||||
final ShardId shardId = new ShardId(indexMetaData.getIndex(), shard);
|
||||
|
@ -240,7 +242,7 @@ public class SyncedFlushService extends AbstractComponent implements IndexEventL
|
|||
final IndexShardRoutingTable getShardRoutingTable(ShardId shardId, ClusterState state) {
|
||||
final IndexRoutingTable indexRoutingTable = state.routingTable().index(shardId.getIndexName());
|
||||
if (indexRoutingTable == null) {
|
||||
IndexMetaData index = state.getMetaData().index(shardId.getIndexName());
|
||||
IndexMetaData index = state.getMetaData().index(shardId.getIndex());
|
||||
if (index != null && index.getState() == IndexMetaData.State.CLOSE) {
|
||||
throw new IndexClosedException(shardId.getIndex());
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.ClusterState;
|
|||
import org.elasticsearch.cluster.ClusterStateListener;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.ClusterStateUpdateTask;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
|
||||
|
@ -115,7 +116,13 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
|||
if (shardCanBeDeleted(event.state(), indexShardRoutingTable)) {
|
||||
ShardId shardId = indexShardRoutingTable.shardId();
|
||||
IndexService indexService = indicesService.indexService(indexRoutingTable.getIndex());
|
||||
IndexSettings indexSettings = indexService != null ? indexService.getIndexSettings() : new IndexSettings(event.state().getMetaData().index(indexRoutingTable.getIndex()), settings);
|
||||
final IndexSettings indexSettings;
|
||||
if (indexService == null) {
|
||||
IndexMetaData indexMetaData = event.state().getMetaData().getIndexSafe(indexRoutingTable.getIndex());
|
||||
indexSettings = new IndexSettings(indexMetaData, settings);
|
||||
} else {
|
||||
indexSettings = indexService.getIndexSettings();
|
||||
}
|
||||
if (indicesService.canDeleteShardContent(shardId, indexSettings)) {
|
||||
deleteShardIfExistElseWhere(event.state(), indexShardRoutingTable);
|
||||
}
|
||||
|
@ -164,7 +171,7 @@ public class IndicesStore extends AbstractComponent implements ClusterStateListe
|
|||
|
||||
private void deleteShardIfExistElseWhere(ClusterState state, IndexShardRoutingTable indexShardRoutingTable) {
|
||||
List<Tuple<DiscoveryNode, ShardActiveRequest>> requests = new ArrayList<>(indexShardRoutingTable.size());
|
||||
String indexUUID = state.getMetaData().index(indexShardRoutingTable.shardId().getIndex()).getIndexUUID();
|
||||
String indexUUID = indexShardRoutingTable.shardId().getIndex().getUUID();
|
||||
ClusterName clusterName = state.getClusterName();
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
// Node can't be null, because otherwise shardCanBeDeleted() would have returned false
|
||||
|
|
|
@ -134,7 +134,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
|||
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
|
||||
}
|
||||
}
|
||||
IndexMetaData metaData = clusterService.state().metaData().index(request.shardId.getIndexName());
|
||||
IndexMetaData metaData = clusterService.state().metaData().index(request.shardId.getIndex());
|
||||
if (metaData == null) {
|
||||
return new NodeStoreFilesMetaData(clusterService.localNode(), null);
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ public class TransportNodesListShardStoreMetaData extends TransportNodesAction<T
|
|||
}
|
||||
}
|
||||
// try and see if we an list unallocated
|
||||
IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndexName());
|
||||
IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex());
|
||||
if (metaData == null) {
|
||||
return new StoreFilesMetaData(false, shardId, Store.MetadataSnapshot.EMPTY);
|
||||
}
|
||||
|
|
|
@ -162,7 +162,7 @@ public class IndicesTTLService extends AbstractLifecycleComponent<IndicesTTLServ
|
|||
MetaData metaData = clusterService.state().metaData();
|
||||
for (IndexService indexService : indicesService) {
|
||||
// check the value of disable_purge for this index
|
||||
IndexMetaData indexMetaData = metaData.index(indexService.index().getName());
|
||||
IndexMetaData indexMetaData = metaData.index(indexService.index());
|
||||
if (indexMetaData == null) {
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -83,8 +83,8 @@ public class RestIndicesAction extends AbstractCatAction {
|
|||
public void processResponse(final ClusterStateResponse clusterStateResponse) {
|
||||
ClusterState state = clusterStateResponse.getState();
|
||||
final IndicesOptions concreteIndicesOptions = IndicesOptions.fromOptions(false, true, true, true);
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, concreteIndicesOptions, indices);
|
||||
final String[] openIndices = indexNameExpressionResolver.concreteIndices(state, IndicesOptions.lenientExpandOpen(), indices);
|
||||
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, concreteIndicesOptions, indices);
|
||||
final String[] openIndices = indexNameExpressionResolver.concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), indices);
|
||||
ClusterHealthRequest clusterHealthRequest = Requests.clusterHealthRequest(openIndices);
|
||||
clusterHealthRequest.local(request.paramAsBoolean("local", clusterHealthRequest.local()));
|
||||
client.admin().cluster().health(clusterHealthRequest, new RestActionListener<ClusterHealthResponse>(channel) {
|
||||
|
|
|
@ -191,7 +191,7 @@ public class RestShardsAction extends AbstractCatAction {
|
|||
table.addCell(shard.getIndexName());
|
||||
table.addCell(shard.id());
|
||||
|
||||
IndexMetaData indexMeta = state.getState().getMetaData().index(shard.index());
|
||||
IndexMetaData indexMeta = state.getState().getMetaData().getIndexSafe(shard.index());
|
||||
boolean usesShadowReplicas = false;
|
||||
if (indexMeta != null) {
|
||||
usesShadowReplicas = IndexMetaData.isIndexUsingShadowReplicas(indexMeta.getSettings());
|
||||
|
|
|
@ -195,7 +195,7 @@ public class SearchService extends AbstractLifecycleComponent<SearchService> imp
|
|||
public void afterIndexClosed(Index index, Settings indexSettings) {
|
||||
// once an index is closed we can just clean up all the pending search context information
|
||||
// to release memory and let references to the filesystem go etc.
|
||||
IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index.getName());
|
||||
IndexMetaData idxMeta = SearchService.this.clusterService.state().metaData().index(index);
|
||||
if (idxMeta != null && idxMeta.getState() == IndexMetaData.State.CLOSE) {
|
||||
// we need to check if it's really closed
|
||||
// since sometimes due to a relocation we already closed the shard and that causes the index to be closed
|
||||
|
|
|
@ -778,18 +778,19 @@ public class RestoreService extends AbstractComponent implements ClusterStateLis
|
|||
* Check if any of the indices to be closed are currently being restored from a snapshot and fail closing if such an index
|
||||
* is found as closing an index that is being restored makes the index unusable (it cannot be recovered).
|
||||
*/
|
||||
public static void checkIndexClosing(ClusterState currentState, Set<String> indices) {
|
||||
public static void checkIndexClosing(ClusterState currentState, Set<IndexMetaData> indices) {
|
||||
RestoreInProgress restore = currentState.custom(RestoreInProgress.TYPE);
|
||||
if (restore != null) {
|
||||
Set<String> indicesToFail = null;
|
||||
Set<Index> indicesToFail = null;
|
||||
for (RestoreInProgress.Entry entry : restore.entries()) {
|
||||
for (ObjectObjectCursor<ShardId, RestoreInProgress.ShardRestoreStatus> shard : entry.shards()) {
|
||||
if (!shard.value.state().completed()) {
|
||||
if (indices.contains(shard.key.getIndexName())) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(shard.key.getIndex());
|
||||
if (indexMetaData != null && indices.contains(indexMetaData)) {
|
||||
if (indicesToFail == null) {
|
||||
indicesToFail = new HashSet<>();
|
||||
}
|
||||
indicesToFail.add(shard.key.getIndexName());
|
||||
indicesToFail.add(shard.key.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -51,6 +51,7 @@ import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.snapshots.IndexShardRepository;
|
||||
import org.elasticsearch.index.snapshots.IndexShardSnapshotStatus;
|
||||
|
@ -204,7 +205,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
|
||||
if (snapshots == null || snapshots.entries().isEmpty()) {
|
||||
// Store newSnapshot here to be processed in clusterStateProcessed
|
||||
List<String> indices = Arrays.asList(indexNameExpressionResolver.concreteIndices(currentState, request.indicesOptions(), request.indices()));
|
||||
List<String> indices = Arrays.asList(indexNameExpressionResolver.concreteIndexNames(currentState, request.indicesOptions(), request.indices()));
|
||||
logger.trace("[{}][{}] creating snapshot for indices [{}]", request.repository(), request.name(), indices);
|
||||
newSnapshot = new SnapshotsInProgress.Entry(snapshotId, request.includeGlobalState(), request.partial(), State.INIT, indices, System.currentTimeMillis(), null);
|
||||
snapshots = new SnapshotsInProgress(newSnapshot);
|
||||
|
@ -751,7 +752,7 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
Set<String> closed = new HashSet<>();
|
||||
for (ObjectObjectCursor<ShardId, SnapshotsInProgress.ShardSnapshotStatus> entry : shards) {
|
||||
if (entry.value.state() == State.MISSING) {
|
||||
if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.index(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) {
|
||||
if (metaData.hasIndex(entry.key.getIndex().getName()) && metaData.getIndexSafe(entry.key.getIndex()).getState() == IndexMetaData.State.CLOSE) {
|
||||
closed.add(entry.key.getIndex().getName());
|
||||
} else {
|
||||
missing.add(entry.key.getIndex().getName());
|
||||
|
@ -1065,8 +1066,8 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
* Check if any of the indices to be deleted are currently being snapshotted. Fail as deleting an index that is being
|
||||
* snapshotted (with partial == false) makes the snapshot fail.
|
||||
*/
|
||||
public static void checkIndexDeletion(ClusterState currentState, Set<String> indices) {
|
||||
Set<String> indicesToFail = indicesToFailForCloseOrDeletion(currentState, indices);
|
||||
public static void checkIndexDeletion(ClusterState currentState, Set<IndexMetaData> indices) {
|
||||
Set<Index> indicesToFail = indicesToFailForCloseOrDeletion(currentState, indices);
|
||||
if (indicesToFail != null) {
|
||||
throw new IllegalArgumentException("Cannot delete indices that are being snapshotted: " + indicesToFail +
|
||||
". Try again after snapshot finishes or cancel the currently running snapshot.");
|
||||
|
@ -1077,37 +1078,39 @@ public class SnapshotsService extends AbstractLifecycleComponent<SnapshotsServic
|
|||
* Check if any of the indices to be closed are currently being snapshotted. Fail as closing an index that is being
|
||||
* snapshotted (with partial == false) makes the snapshot fail.
|
||||
*/
|
||||
public static void checkIndexClosing(ClusterState currentState, Set<String> indices) {
|
||||
Set<String> indicesToFail = indicesToFailForCloseOrDeletion(currentState, indices);
|
||||
public static void checkIndexClosing(ClusterState currentState, Set<IndexMetaData> indices) {
|
||||
Set<Index> indicesToFail = indicesToFailForCloseOrDeletion(currentState, indices);
|
||||
if (indicesToFail != null) {
|
||||
throw new IllegalArgumentException("Cannot close indices that are being snapshotted: " + indicesToFail +
|
||||
". Try again after snapshot finishes or cancel the currently running snapshot.");
|
||||
}
|
||||
}
|
||||
|
||||
private static Set<String> indicesToFailForCloseOrDeletion(ClusterState currentState, Set<String> indices) {
|
||||
private static Set<Index> indicesToFailForCloseOrDeletion(ClusterState currentState, Set<IndexMetaData> indices) {
|
||||
SnapshotsInProgress snapshots = currentState.custom(SnapshotsInProgress.TYPE);
|
||||
Set<String> indicesToFail = null;
|
||||
Set<Index> indicesToFail = null;
|
||||
if (snapshots != null) {
|
||||
for (final SnapshotsInProgress.Entry entry : snapshots.entries()) {
|
||||
if (entry.partial() == false) {
|
||||
if (entry.state() == State.INIT) {
|
||||
for (String index : entry.indices()) {
|
||||
if (indices.contains(index)) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData != null && indices.contains(indexMetaData)) {
|
||||
if (indicesToFail == null) {
|
||||
indicesToFail = new HashSet<>();
|
||||
}
|
||||
indicesToFail.add(index);
|
||||
indicesToFail.add(indexMetaData.getIndex());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (ObjectObjectCursor<ShardId, SnapshotsInProgress.ShardSnapshotStatus> shard : entry.shards()) {
|
||||
if (!shard.value.state().completed()) {
|
||||
if (indices.contains(shard.key.getIndexName())) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(shard.key.getIndex());
|
||||
if (indexMetaData != null && indices.contains(indexMetaData)) {
|
||||
if (indicesToFail == null) {
|
||||
indicesToFail = new HashSet<>();
|
||||
}
|
||||
indicesToFail.add(shard.key.getIndexName());
|
||||
indicesToFail.add(shard.key.getIndex());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -405,9 +405,11 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
if (table == null) {
|
||||
continue;
|
||||
}
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(tribeIndex.getIndex());
|
||||
//NOTE: we have to use the index name here since UUID are different even if the name is the same
|
||||
final String indexName = tribeIndex.getIndex().getName();
|
||||
final IndexMetaData indexMetaData = currentState.metaData().index(indexName);
|
||||
if (indexMetaData == null) {
|
||||
if (!droppedIndices.contains(tribeIndex.getIndex().getName())) {
|
||||
if (!droppedIndices.contains(indexName)) {
|
||||
// a new index, add it, and add the tribe name as a setting
|
||||
clusterStateChanged = true;
|
||||
logger.info("[{}] adding index {}", tribeName, tribeIndex.getIndex());
|
||||
|
@ -425,7 +427,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
logger.info("[{}] dropping index {} due to conflict with [{}]", tribeName, tribeIndex.getIndex(),
|
||||
existingFromTribe);
|
||||
removeIndex(blocks, metaData, routingTable, tribeIndex);
|
||||
droppedIndices.add(tribeIndex.getIndex().getName());
|
||||
droppedIndices.add(indexName);
|
||||
} else if (onConflict.startsWith(ON_CONFLICT_PREFER)) {
|
||||
// on conflict, prefer a tribe...
|
||||
String preferredTribeName = onConflict.substring(ON_CONFLICT_PREFER.length());
|
||||
|
|
|
@ -191,7 +191,7 @@ public class TransportBulkActionTookTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {
|
||||
return request.indices();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -168,7 +168,7 @@ public class TransportBroadcastByNodeActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {
|
||||
return request.indices();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1065,8 +1065,8 @@ public class TransportReplicationActionTests extends ESTestCase {
|
|||
ClusterService clusterService,
|
||||
ThreadPool threadPool) {
|
||||
super(settings, actionName, transportService, clusterService, null, threadPool,
|
||||
new ShardStateAction(settings, clusterService, transportService, null, null, threadPool), null,
|
||||
new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME);
|
||||
new ShardStateAction(settings, clusterService, transportService, null, null, threadPool),
|
||||
new ActionFilters(new HashSet<ActionFilter>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new, Request::new, ThreadPool.Names.SAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -123,7 +123,7 @@ public class TransportInstanceSingleOperationActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
@Override
|
||||
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
|
||||
public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {
|
||||
return request.indices();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,7 +64,7 @@ public class ClusterStateHealthTests extends ESTestCase {
|
|||
routingTable.add(indexRoutingTable);
|
||||
}
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable.build()).build();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), (String[]) null);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), (String[]) null);
|
||||
ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices);
|
||||
logger.info("cluster status: {}, expected {}", clusterStateHealth.getStatus(), counter.status());
|
||||
clusterStateHealth = maybeSerialize(clusterStateHealth);
|
||||
|
@ -91,7 +91,7 @@ public class ClusterStateHealthTests extends ESTestCase {
|
|||
metaData.put(indexMetaData, true);
|
||||
routingTable.add(indexRoutingTable);
|
||||
ClusterState clusterState = ClusterState.builder(ClusterName.DEFAULT).metaData(metaData).routingTable(routingTable.build()).build();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), (String[]) null);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), (String[]) null);
|
||||
ClusterStateHealth clusterStateHealth = new ClusterStateHealth(clusterState, concreteIndices);
|
||||
clusterStateHealth = maybeSerialize(clusterStateHealth);
|
||||
// currently we have no cluster level validation failures as index validation issues are reported per index.
|
||||
|
|
|
@ -61,79 +61,79 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.strictExpandOpen(), IndicesOptions.strictExpand()};
|
||||
for (IndicesOptions options : indicesOptions) {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foo");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("bar"));
|
||||
}
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoo", "foobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo", "foobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")),
|
||||
new HashSet<>(Arrays.asList(results)));
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("bar"));
|
||||
}
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo", "bar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo", "bar");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("bar"));
|
||||
}
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "foobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "foobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "barbaz", "bar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "bar");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("bar"));
|
||||
}
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
}
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen());
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(3, results.length);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, (String[])null);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, (String[])null);
|
||||
assertEquals(3, results.length);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpand());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(4, results.length);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, (String[])null);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, (String[])null);
|
||||
assertEquals(4, results.length);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*");
|
||||
assertEquals(3, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo"));
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpand());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*");
|
||||
assertEquals(4, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed"));
|
||||
}
|
||||
|
@ -150,57 +150,57 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndicesOptions[] indicesOptions = new IndicesOptions[]{ IndicesOptions.lenientExpandOpen(), lenientExpand};
|
||||
for (IndicesOptions options : indicesOptions) {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foo");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoo", "foobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo", "foobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
assertEquals(2, results.length);
|
||||
assertEquals(new HashSet<>(Arrays.asList("foo", "foobar")),
|
||||
new HashSet<>(Arrays.asList(results)));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo", "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "bar");
|
||||
assertEquals(1, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "foobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "foobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foofoo", "foobar"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "barbaz", "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "barbaz", "bar");
|
||||
assertEquals(1, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foofoo"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
}
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(3, results.length);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, lenientExpand);
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(Arrays.toString(results), 4, results.length);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*");
|
||||
assertEquals(3, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo"));
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, lenientExpand);
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoo*");
|
||||
assertEquals(4, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar", "foofoo", "foofoo-closed"));
|
||||
}
|
||||
|
@ -219,26 +219,26 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
for (IndicesOptions options : indicesOptions) {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foo");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
fail();
|
||||
} catch(IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("bar"));
|
||||
}
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
}
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
|
@ -246,11 +246,11 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
}
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, expandOpen);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(3, results.length);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, expand);
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(4, results.length);
|
||||
}
|
||||
|
||||
|
@ -264,60 +264,60 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
// Only closed
|
||||
IndicesOptions options = IndicesOptions.fromOptions(false, true, false, true);
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
// no wildcards, so wildcard expansion don't apply
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("bar", results[0]);
|
||||
|
||||
// Only open
|
||||
options = IndicesOptions.fromOptions(false, true, true, false);
|
||||
context = new IndexNameExpressionResolver.Context(state, options);
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("bar", "foobar"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foobar", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("bar", results[0]);
|
||||
|
||||
// Open and closed
|
||||
options = IndicesOptions.fromOptions(false, true, true, true);
|
||||
context = new IndexNameExpressionResolver.Context(state, options);
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(3, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("bar", "foobar", "foo"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo*");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foobar", "foo"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "bar");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("bar", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "-foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "-foo*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("bar", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "-*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "-*");
|
||||
assertEquals(0, results.length);
|
||||
|
||||
options = IndicesOptions.fromOptions(false, false, true, true);
|
||||
context = new IndexNameExpressionResolver.Context(state, options);
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "-*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "-*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getResourceId().toString(), equalTo("[-*]"));
|
||||
|
@ -336,21 +336,21 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
{
|
||||
IndicesOptions noExpandLenient = IndicesOptions.fromOptions(true, true, false, false);
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandLenient);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, (String[])null);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, (String[])null);
|
||||
assertEquals(0, results.length);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertEquals(0, results.length);
|
||||
}
|
||||
|
||||
|
@ -359,17 +359,17 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndicesOptions noExpandDisallowEmpty = IndicesOptions.fromOptions(true, false, false, false);
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandDisallowEmpty);
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
}
|
||||
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
assertEquals(1, results.length);
|
||||
assertEquals("foo", results[0]);
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
|
||||
}
|
||||
|
@ -378,17 +378,17 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
{
|
||||
IndicesOptions noExpandErrorUnavailable = IndicesOptions.fromOptions(false, true, false, false);
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandErrorUnavailable);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
}
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
|
||||
}
|
||||
|
@ -398,20 +398,20 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndicesOptions noExpandStrict = IndicesOptions.fromOptions(false, false, false, false);
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, noExpandStrict);
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
}
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
}
|
||||
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foobar"));
|
||||
}
|
||||
|
@ -429,7 +429,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
try {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed());
|
||||
indexNameExpressionResolver.concreteIndices(context, "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
|
@ -437,7 +437,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
try {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed());
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo", "baz*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo", "baz*");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("baz*"));
|
||||
|
@ -445,7 +445,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
try {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed());
|
||||
indexNameExpressionResolver.concreteIndices(context, "foofoobar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foofoobar");
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it"));
|
||||
|
@ -453,7 +453,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
try {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed());
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo", "foofoobar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo", "foofoobar");
|
||||
fail();
|
||||
} catch(IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Alias [foofoobar] has more than one indices associated with it"));
|
||||
|
@ -461,7 +461,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
try {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed());
|
||||
indexNameExpressionResolver.concreteIndices(context, "foofoo-closed", "foofoobar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foofoo-closed", "foofoobar");
|
||||
fail();
|
||||
} catch(IndexClosedException e) {
|
||||
assertThat(e.getMessage(), equalTo("closed"));
|
||||
|
@ -469,7 +469,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
}
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictSingleIndexNoExpandForbidClosed());
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foo", "barbaz");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo", "barbaz");
|
||||
assertEquals(2, results.length);
|
||||
assertThat(results, arrayContainingInAnyOrder("foo", "foofoo"));
|
||||
}
|
||||
|
@ -479,18 +479,18 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
IndicesOptions options = IndicesOptions.strictExpandOpen();
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertThat(results, emptyArray());
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("foo"));
|
||||
}
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo*");
|
||||
assertThat(results, emptyArray());
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo*", "bar");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo*", "bar");
|
||||
fail();
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getIndex().getName(), equalTo("bar"));
|
||||
|
@ -498,18 +498,18 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
assertThat(results, emptyArray());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo");
|
||||
assertThat(results, emptyArray());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo*");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo*");
|
||||
assertThat(results, emptyArray());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo*", "bar");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo*", "bar");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, false, true, false));
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, Strings.EMPTY_ARRAY);
|
||||
indexNameExpressionResolver.concreteIndexNames(context, Strings.EMPTY_ARRAY);
|
||||
} catch (IndexNotFoundException e) {
|
||||
assertThat(e.getResourceId().toString(), equalTo("[_all]"));
|
||||
}
|
||||
|
@ -527,7 +527,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen());
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "testZZZ");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "testZZZ");
|
||||
fail("Expected IndexNotFoundException");
|
||||
} catch(IndexNotFoundException e) {
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
|
@ -541,7 +541,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX")));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testXXX", "testZZZ")), equalTo(newHashSet("testXXX")));
|
||||
}
|
||||
|
||||
public void testConcreteIndicesIgnoreIndicesAllMissing() {
|
||||
|
@ -552,7 +552,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen());
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "testMo", "testMahdy");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "testMo", "testMahdy");
|
||||
fail("Expected IndexNotFoundException");
|
||||
} catch(IndexNotFoundException e) {
|
||||
assertThat(e.getMessage(), is("no such index"));
|
||||
|
@ -565,7 +565,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
.put(indexBuilder("kuku"));
|
||||
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX")));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, new String[]{})), equalTo(newHashSet("kuku", "testXXX")));
|
||||
}
|
||||
|
||||
public void testConcreteIndicesWildcardExpansion() {
|
||||
|
@ -578,13 +578,13 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, false));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(new HashSet<String>()));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(new HashSet<String>()));
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, false));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY")));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY")));
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, false, true));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXYY")));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXYY")));
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, true, true, true));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndices(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY")));
|
||||
assertThat(newHashSet(indexNameExpressionResolver.concreteIndexNames(context, "testX*")), equalTo(newHashSet("testXXX", "testXXY", "testXYY")));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -610,7 +610,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
// with no indices, asking for all indices should return empty list or exception, depending on indices options
|
||||
if (indicesOptions.allowNoIndices()) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, allIndices);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(context, allIndices);
|
||||
assertThat(concreteIndices, notNullValue());
|
||||
assertThat(concreteIndices.length, equalTo(0));
|
||||
} else {
|
||||
|
@ -625,7 +625,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
context = new IndexNameExpressionResolver.Context(state, indicesOptions);
|
||||
if (indicesOptions.expandWildcardsOpen() || indicesOptions.expandWildcardsClosed() || indicesOptions.allowNoIndices()) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, allIndices);
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(context, allIndices);
|
||||
assertThat(concreteIndices, notNullValue());
|
||||
int expectedNumberOfIndices = 0;
|
||||
if (indicesOptions.expandWildcardsOpen()) {
|
||||
|
@ -646,7 +646,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
*/
|
||||
private void checkCorrectException(IndexNameExpressionResolver indexNameExpressionResolver, IndexNameExpressionResolver.Context context, String[] allIndices) {
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, allIndices);
|
||||
indexNameExpressionResolver.concreteIndexNames(context, allIndices);
|
||||
fail("wildcard expansion on should trigger IndexMissingException");
|
||||
} catch (IndexNotFoundException e) {
|
||||
// expected
|
||||
|
@ -668,12 +668,12 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
// asking for non existing wildcard pattern should return empty list or exception
|
||||
if (indicesOptions.allowNoIndices()) {
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(context, "Foo*");
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(context, "Foo*");
|
||||
assertThat(concreteIndices, notNullValue());
|
||||
assertThat(concreteIndices.length, equalTo(0));
|
||||
} else {
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "Foo*");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "Foo*");
|
||||
fail("expecting exception when result empty and allowNoIndicec=false");
|
||||
} catch (IndexNotFoundException e) {
|
||||
// expected exception
|
||||
|
@ -798,51 +798,51 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed());
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foo1-closed");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed");
|
||||
fail("foo1-closed should be closed, but it is open");
|
||||
} catch (IndexClosedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foobar1-closed");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foobar1-closed");
|
||||
fail("foo1-closed should be closed, but it is open");
|
||||
} catch (IndexClosedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions()));
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "foo1-closed");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foobar1-closed");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foobar1-closed");
|
||||
assertThat(results, emptyArray());
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foo1-closed");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foo1-closed");
|
||||
assertThat(results, arrayWithSize(1));
|
||||
assertThat(results, arrayContaining("foo1-closed"));
|
||||
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foobar1-closed");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foobar1-closed");
|
||||
assertThat(results, arrayWithSize(1));
|
||||
assertThat(results, arrayContaining("foo1-closed"));
|
||||
|
||||
// testing an alias pointing to three indices:
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpenAndForbidClosed());
|
||||
try {
|
||||
indexNameExpressionResolver.concreteIndices(context, "foobar2-closed");
|
||||
indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed");
|
||||
fail("foo2-closed should be closed, but it is open");
|
||||
} catch (IndexClosedException e) {
|
||||
// expected
|
||||
}
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.fromOptions(true, context.getOptions().allowNoIndices(), context.getOptions().expandWildcardsOpen(), context.getOptions().expandWildcardsClosed(), context.getOptions()));
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foobar2-closed");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed");
|
||||
assertThat(results, arrayWithSize(1));
|
||||
assertThat(results, arrayContaining("foo3"));
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
results = indexNameExpressionResolver.concreteIndices(context, "foobar2-closed");
|
||||
results = indexNameExpressionResolver.concreteIndexNames(context, "foobar2-closed");
|
||||
assertThat(results, arrayWithSize(3));
|
||||
assertThat(results, arrayContainingInAnyOrder("foo1-closed", "foo2-closed", "foo3"));
|
||||
}
|
||||
|
@ -855,7 +855,7 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
IndicesOptions.lenientExpandOpen(), IndicesOptions.strictExpandOpenAndForbidClosed()};
|
||||
for (IndicesOptions options : indicesOptions) {
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, options);
|
||||
String[] results = indexNameExpressionResolver.concreteIndices(context, "index1", "index1", "alias1");
|
||||
String[] results = indexNameExpressionResolver.concreteIndexNames(context, "index1", "index1", "alias1");
|
||||
assertThat(results, equalTo(new String[]{"index1"}));
|
||||
}
|
||||
}
|
||||
|
@ -875,11 +875,11 @@ public class IndexNameExpressionResolverTests extends ESTestCase {
|
|||
ClusterState state = ClusterState.builder(new ClusterName("_name")).metaData(mdBuilder).build();
|
||||
|
||||
IndexNameExpressionResolver.Context context = new IndexNameExpressionResolver.Context(state, IndicesOptions.lenientExpandOpen());
|
||||
String[] strings = indexNameExpressionResolver.concreteIndices(context, "alias-*");
|
||||
String[] strings = indexNameExpressionResolver.concreteIndexNames(context, "alias-*");
|
||||
assertArrayEquals(new String[] {"test-0"}, strings);
|
||||
|
||||
context = new IndexNameExpressionResolver.Context(state, IndicesOptions.strictExpandOpen());
|
||||
strings = indexNameExpressionResolver.concreteIndices(context, "alias-*");
|
||||
strings = indexNameExpressionResolver.concreteIndexNames(context, "alias-*");
|
||||
|
||||
assertArrayEquals(new String[] {"test-0"}, strings);
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESAllocationTestCase;
|
||||
|
||||
import java.util.HashMap;
|
||||
|
@ -172,12 +173,12 @@ public class GatewayMetaStateTests extends ESAllocationTestCase {
|
|||
boolean stateInMemory,
|
||||
boolean expectMetaData) throws Exception {
|
||||
MetaData inMemoryMetaData = null;
|
||||
Set<String> oldIndicesList = emptySet();
|
||||
Set<Index> oldIndicesList = emptySet();
|
||||
if (stateInMemory) {
|
||||
inMemoryMetaData = event.previousState().metaData();
|
||||
oldIndicesList = GatewayMetaState.getRelevantIndices(event.previousState(), event.previousState(), oldIndicesList);
|
||||
}
|
||||
Set<String> newIndicesList = GatewayMetaState.getRelevantIndices(event.state(),event.previousState(), oldIndicesList);
|
||||
Set<Index> newIndicesList = GatewayMetaState.getRelevantIndices(event.state(),event.previousState(), oldIndicesList);
|
||||
// third, get the actual write info
|
||||
Iterator<GatewayMetaState.IndexMetaWriteInfo> indices = GatewayMetaState.resolveStatesToBeWritten(oldIndicesList, newIndicesList, inMemoryMetaData, event.state().metaData()).iterator();
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.ShardRoutingState;
|
|||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
@ -37,7 +38,7 @@ import java.util.Map;
|
|||
public class PriorityComparatorTests extends ESTestCase {
|
||||
|
||||
public void testPreferNewIndices() {
|
||||
RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards((RoutingNodes) null);
|
||||
RoutingNodes.UnassignedShards shards = new RoutingNodes.UnassignedShards(null);
|
||||
List<ShardRouting> shardRoutings = Arrays.asList(TestShardRouting.newShardRouting("oldest", 0, null, null, null,
|
||||
randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")), TestShardRouting.newShardRouting("newest", 0, null, null, null,
|
||||
randomBoolean(), ShardRoutingState.UNASSIGNED, new UnassignedInfo(randomFrom(UnassignedInfo.Reason.values()), "foobar")));
|
||||
|
@ -47,11 +48,11 @@ public class PriorityComparatorTests extends ESTestCase {
|
|||
}
|
||||
shards.sort(new PriorityComparator() {
|
||||
@Override
|
||||
protected Settings getIndexSettings(String index) {
|
||||
if ("oldest".equals(index)) {
|
||||
protected Settings getIndexSettings(Index index) {
|
||||
if ("oldest".equals(index.getName())) {
|
||||
return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10)
|
||||
.put(IndexMetaData.SETTING_PRIORITY, 1).build();
|
||||
} else if ("newest".equals(index)) {
|
||||
} else if ("newest".equals(index.getName())) {
|
||||
return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100)
|
||||
.put(IndexMetaData.SETTING_PRIORITY, 1).build();
|
||||
}
|
||||
|
@ -77,11 +78,11 @@ public class PriorityComparatorTests extends ESTestCase {
|
|||
}
|
||||
shards.sort(new PriorityComparator() {
|
||||
@Override
|
||||
protected Settings getIndexSettings(String index) {
|
||||
if ("oldest".equals(index)) {
|
||||
protected Settings getIndexSettings(Index index) {
|
||||
if ("oldest".equals(index.getName())) {
|
||||
return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 10)
|
||||
.put(IndexMetaData.SETTING_PRIORITY, 100).build();
|
||||
} else if ("newest".equals(index)) {
|
||||
} else if ("newest".equals(index.getName())) {
|
||||
return Settings.builder().put(IndexMetaData.SETTING_CREATION_DATE, 100)
|
||||
.put(IndexMetaData.SETTING_PRIORITY, 1).build();
|
||||
}
|
||||
|
@ -118,8 +119,8 @@ public class PriorityComparatorTests extends ESTestCase {
|
|||
}
|
||||
shards.sort(new PriorityComparator() {
|
||||
@Override
|
||||
protected Settings getIndexSettings(String index) {
|
||||
IndexMeta indexMeta = map.get(index);
|
||||
protected Settings getIndexSettings(Index index) {
|
||||
IndexMeta indexMeta = map.get(index.getName());
|
||||
return indexMeta.settings;
|
||||
}
|
||||
});
|
||||
|
|
|
@ -182,7 +182,7 @@ public class TimestampMappingTests extends ESSingleNodeTestCase {
|
|||
IndexRequest request = new IndexRequest("test", "type", "1").source(doc);
|
||||
request.process(metaData, mappingMetaData, true, "test");
|
||||
assertThat(request.timestamp(), notNullValue());
|
||||
assertThat(request.timestamp(), is(MappingMetaData.Timestamp.parseStringTimestamp("1970-01-01", Joda.forPattern("YYYY-MM-dd"), Version.CURRENT)));
|
||||
assertThat(request.timestamp(), is(MappingMetaData.Timestamp.parseStringTimestamp("1970-01-01", Joda.forPattern("YYYY-MM-dd"))));
|
||||
}
|
||||
|
||||
// Issue 4718: was throwing a TimestampParsingException: failed to parse timestamp [null]
|
||||
|
|
|
@ -65,6 +65,7 @@ import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitC
|
|||
import static org.hamcrest.Matchers.equalTo;
|
||||
import static org.hamcrest.Matchers.notNullValue;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
import static org.hamcrest.Matchers.startsWith;
|
||||
|
||||
public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
||||
|
||||
|
@ -685,7 +686,7 @@ public class IndicesOptionsIntegrationIT extends ESIntegTestCase {
|
|||
try {
|
||||
verify(client().admin().indices().prepareUpdateSettings("barbaz").setSettings(Settings.builder().put("e", "f")), false);
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), equalTo("Can't update non dynamic settings[[index.e]] for open indices [[barbaz]]"));
|
||||
assertThat(e.getMessage(), startsWith("Can't update non dynamic settings [[index.e]] for open indices [[barbaz"));
|
||||
}
|
||||
verify(client().admin().indices().prepareUpdateSettings("baz*").setSettings(Settings.builder().put("a", "b")), true);
|
||||
}
|
||||
|
|
|
@ -137,7 +137,8 @@ public class UpdateSettingsIT extends ESIntegTestCase {
|
|||
.execute().actionGet();
|
||||
fail("can't change number of replicas on a closed index");
|
||||
} catch (IllegalArgumentException ex) {
|
||||
assertEquals(ex.getMessage(), "Can't update [index.number_of_replicas] on closed indices [[test]] - can leave index in an unopenable state");
|
||||
assertTrue(ex.getMessage(), ex.getMessage().startsWith("Can't update [index.number_of_replicas] on closed indices [[test/"));
|
||||
assertTrue(ex.getMessage(), ex.getMessage().endsWith("]] - can leave index in an unopenable state"));
|
||||
// expected
|
||||
}
|
||||
client().admin().indices().prepareUpdateSettings("test")
|
||||
|
|
|
@ -1880,7 +1880,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin().indices().prepareDelete("test-idx-1").get();
|
||||
fail("Expected deleting index to fail during snapshot");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [test-idx-1]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot delete indices that are being snapshotted: [[test-idx-1/"));
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
|
@ -1888,7 +1888,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin().indices().prepareClose("test-idx-1").get();
|
||||
fail("Expected closing index to fail during snapshot");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [test-idx-1]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot close indices that are being snapshotted: [[test-idx-1/"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1964,9 +1964,10 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
client.admin().indices().prepareClose("test-idx-1").get();
|
||||
fail("Expected closing index to fail during restore");
|
||||
} catch (IllegalArgumentException e) {
|
||||
assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [test-idx-1]"));
|
||||
assertThat(e.getMessage(), containsString("Cannot close indices that are being restored: [[test-idx-1/"));
|
||||
}
|
||||
} finally {
|
||||
// unblock even if the try block fails otherwise we will get bogus failures when we delete all indices in test teardown.
|
||||
logger.info("--> unblocking all data nodes");
|
||||
unblockAllDataNodes("test-repo");
|
||||
}
|
||||
|
|
|
@ -370,7 +370,7 @@ public class IndicesRequestTests extends ESIntegTestCase {
|
|||
internalCluster().clientNodeClient().admin().indices().flush(flushRequest).actionGet();
|
||||
|
||||
clearInterceptedActions();
|
||||
String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndices(client().admin().cluster().prepareState().get().getState(), flushRequest);
|
||||
String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndexNames(client().admin().cluster().prepareState().get().getState(), flushRequest);
|
||||
assertIndicesSubset(Arrays.asList(indices), indexShardActions);
|
||||
}
|
||||
|
||||
|
@ -393,7 +393,7 @@ public class IndicesRequestTests extends ESIntegTestCase {
|
|||
internalCluster().clientNodeClient().admin().indices().refresh(refreshRequest).actionGet();
|
||||
|
||||
clearInterceptedActions();
|
||||
String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndices(client().admin().cluster().prepareState().get().getState(), refreshRequest);
|
||||
String[] indices = new IndexNameExpressionResolver(Settings.EMPTY).concreteIndexNames(client().admin().cluster().prepareState().get().getState(), refreshRequest);
|
||||
assertIndicesSubset(Arrays.asList(indices), indexShardActions);
|
||||
}
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue