Merge branch 'master' into add_backwards_rest_tests

This commit is contained in:
Simon Willnauer 2016-03-14 21:18:42 +01:00
commit bd96075f7f
343 changed files with 3383 additions and 5067 deletions

View File

@ -1,5 +1,5 @@
elasticsearch = 5.0.0
lucene = 6.0.0-snapshot-bea235f
lucene = 6.0.0-snapshot-f0aa4fc
# optional dependencies
spatial4j = 0.6

View File

@ -787,8 +787,9 @@ public class MapperQueryParser extends QueryParser {
assert q instanceof BoostQuery == false;
return pq;
} else if (q instanceof MultiPhraseQuery) {
((MultiPhraseQuery) q).setSlop(slop);
return q;
MultiPhraseQuery.Builder builder = new MultiPhraseQuery.Builder((MultiPhraseQuery) q);
builder.setSlop(slop);
return builder.build();
} else {
return q;
}

View File

@ -34,7 +34,6 @@ import org.elasticsearch.common.lucene.search.function.FunctionScoreQuery;
import java.io.IOException;
import java.util.Collection;
import java.util.List;
/**
*
@ -68,7 +67,7 @@ public class CustomFieldQuery extends FieldQuery {
flatten(((FiltersFunctionScoreQuery) sourceQuery).getSubQuery(), reader, flatQueries, boost);
} else if (sourceQuery instanceof MultiPhraseQuery) {
MultiPhraseQuery q = ((MultiPhraseQuery) sourceQuery);
convertMultiPhraseQuery(0, new int[q.getTermArrays().size()], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
convertMultiPhraseQuery(0, new int[q.getTermArrays().length], q, q.getTermArrays(), q.getPositions(), reader, flatQueries);
} else if (sourceQuery instanceof BlendedTermQuery) {
final BlendedTermQuery blendedTermQuery = (BlendedTermQuery) sourceQuery;
flatten(blendedTermQuery.rewrite(reader), reader, flatQueries, boost);
@ -77,7 +76,7 @@ public class CustomFieldQuery extends FieldQuery {
}
}
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, List<Term[]> terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
private void convertMultiPhraseQuery(int currentPos, int[] termsIdx, MultiPhraseQuery orig, Term[][] terms, int[] pos, IndexReader reader, Collection<Query> flatQueries) throws IOException {
if (currentPos == 0) {
// if we have more than 16 terms
int numTerms = 0;
@ -97,16 +96,16 @@ public class CustomFieldQuery extends FieldQuery {
* we walk all possible ways and for each path down the MPQ we create a PhraseQuery this is what FieldQuery supports.
* It seems expensive but most queries will pretty small.
*/
if (currentPos == terms.size()) {
if (currentPos == terms.length) {
PhraseQuery.Builder queryBuilder = new PhraseQuery.Builder();
queryBuilder.setSlop(orig.getSlop());
for (int i = 0; i < termsIdx.length; i++) {
queryBuilder.add(terms.get(i)[termsIdx[i]], pos[i]);
queryBuilder.add(terms[i][termsIdx[i]], pos[i]);
}
Query query = queryBuilder.build();
this.flatten(query, reader, flatQueries, 1F);
} else {
Term[] t = terms.get(currentPos);
Term[] t = terms[currentPos];
for (int i = 0; i < t.length; i++) {
termsIdx[currentPos] = i;
convertMultiPhraseQuery(currentPos+1, termsIdx, orig, terms, pos, reader, flatQueries);

View File

@ -213,7 +213,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
}
if (request.indices() != null && request.indices().length > 0) {
try {
indexNameExpressionResolver.concreteIndices(clusterState, IndicesOptions.strictExpand(), request.indices());
indexNameExpressionResolver.concreteIndexNames(clusterState, IndicesOptions.strictExpand(), request.indices());
waitForCounter++;
} catch (IndexNotFoundException e) {
response.setStatus(ClusterHealthStatus.RED); // no indices, make sure its RED
@ -280,7 +280,7 @@ public class TransportClusterHealthAction extends TransportMasterNodeReadAction<
String[] concreteIndices;
try {
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
} catch (IndexNotFoundException e) {
// one of the specified indices is not there - treat it as RED.
ClusterHealthResponse response = new ClusterHealthResponse(clusterName.value(), Strings.EMPTY_ARRAY, clusterState,

View File

@ -67,7 +67,7 @@ public class ClusterSearchShardsGroup implements Streamable, ToXContent {
@Override
public void readFrom(StreamInput in) throws IOException {
index = Index.readIndex(in);
index = new Index(in);
shardId = in.readVInt();
shards = new ShardRouting[in.readVInt()];
for (int i = 0; i < shards.length; i++) {

View File

@ -59,7 +59,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
@Override
protected ClusterBlockException checkBlock(ClusterSearchShardsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -70,7 +70,7 @@ public class TransportClusterSearchShardsAction extends TransportMasterNodeReadA
@Override
protected void masterOperation(final ClusterSearchShardsRequest request, final ClusterState state, final ActionListener<ClusterSearchShardsResponse> listener) {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(state, request.routing(), request.indices());
Set<String> nodeIds = new HashSet<>();
GroupShardsIterator groupShardsIterator = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());

View File

@ -66,7 +66,7 @@ public class TransportCreateSnapshotAction extends TransportMasterNodeAction<Cre
if (clusterBlockException != null) {
return clusterBlockException;
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override

View File

@ -106,7 +106,7 @@ public class TransportClusterStateAction extends TransportMasterNodeReadAction<C
}
if (request.indices().length > 0) {
String[] indices = indexNameExpressionResolver.concreteIndices(currentState, request);
String[] indices = indexNameExpressionResolver.concreteIndexNames(currentState, request);
for (String filteredIndex : indices) {
IndexMetaData indexMetaData = currentState.metaData().index(filteredIndex);
if (indexMetaData != null) {

View File

@ -90,11 +90,11 @@ public class TransportIndicesAliasesAction extends TransportMasterNodeAction<Ind
Set<String> aliases = new HashSet<>();
for (AliasActions action : actions) {
//expand indices
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), action.indices());
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), action.indices());
//collect the aliases
Collections.addAll(aliases, action.aliases());
for (String index : concreteIndices) {
for (String alias : action.concreteAliases(state.metaData(), index)) {
for (String alias : action.concreteAliases(state.metaData(), index)) {
AliasAction finalAction = new AliasAction(action.aliasAction());
finalAction.index(index);
finalAction.alias(alias);

View File

@ -50,7 +50,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction<G
@Override
protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -60,7 +60,7 @@ public class TransportAliasesExistAction extends TransportMasterNodeReadAction<G
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<AliasesExistResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
boolean result = state.metaData().hasAliases(request.aliases(), concreteIndices);
listener.onResponse(new AliasesExistResponse(result));
}

View File

@ -53,7 +53,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
@Override
protected ClusterBlockException checkBlock(GetAliasesRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -63,7 +63,7 @@ public class TransportGetAliasesAction extends TransportMasterNodeReadAction<Get
@Override
protected void masterOperation(GetAliasesRequest request, ClusterState state, ActionListener<GetAliasesResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
@SuppressWarnings("unchecked")
ImmutableOpenMap<String, List<AliasMetaData>> result = (ImmutableOpenMap) state.metaData().findAliases(request.aliases(), concreteIndices);
listener.onResponse(new GetAliasesResponse(result));

View File

@ -33,7 +33,9 @@ import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -46,7 +48,8 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
private final MetaDataIndexStateService indexStateService;
private final DestructiveOperations destructiveOperations;
private volatile boolean closeIndexEnabled;
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING = Setting.boolSetting("cluster.indices.close.enable", true, true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLUSTER_INDICES_CLOSE_ENABLE_SETTING =
Setting.boolSetting("cluster.indices.close.enable", true, Property.Dynamic, Property.NodeScope);
@Inject
public TransportCloseIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
@ -86,12 +89,12 @@ public class TransportCloseIndexAction extends TransportMasterNodeAction<CloseIn
@Override
protected ClusterBlockException checkBlock(CloseIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final CloseIndexRequest request, final ClusterState state, final ActionListener<CloseIndexResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
CloseIndexClusterStateUpdateRequest updateRequest = new CloseIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices);

View File

@ -31,10 +31,15 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Set;
/**
* Delete index action.
*/
@ -70,13 +75,13 @@ public class TransportDeleteIndexAction extends TransportMasterNodeAction<Delete
@Override
protected ClusterBlockException checkBlock(DeleteIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final DeleteIndexRequest request, final ClusterState state, final ActionListener<DeleteIndexResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
if (concreteIndices.length == 0) {
final Set<Index> concreteIndices = new HashSet<>(Arrays.asList(indexNameExpressionResolver.concreteIndices(state, request)));
if (concreteIndices.isEmpty()) {
listener.onResponse(new DeleteIndexResponse(true));
return;
}

View File

@ -60,7 +60,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction<
protected ClusterBlockException checkBlock(IndicesExistsRequest request, ClusterState state) {
//make sure through indices options that the concrete indices call never throws IndexMissingException
IndicesOptions indicesOptions = IndicesOptions.fromOptions(true, true, request.indicesOptions().expandWildcardsOpen(), request.indicesOptions().expandWildcardsClosed());
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, indicesOptions, request.indices()));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, indicesOptions, request.indices()));
}
@Override
@ -68,7 +68,7 @@ public class TransportIndicesExistsAction extends TransportMasterNodeReadAction<
boolean exists;
try {
// Similar as the previous behaviour, but now also aliases and wildcards are supported.
indexNameExpressionResolver.concreteIndices(state, request);
indexNameExpressionResolver.concreteIndexNames(state, request);
exists = true;
} catch (IndexNotFoundException e) {
exists = false;

View File

@ -57,12 +57,12 @@ public class TransportTypesExistsAction extends TransportMasterNodeReadAction<Ty
@Override
protected ClusterBlockException checkBlock(TypesExistsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final TypesExistsRequest request, final ClusterState state, final ActionListener<TypesExistsResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request.indicesOptions(), request.indices());
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request.indicesOptions(), request.indices());
if (concreteIndices.length == 0) {
listener.onResponse(new TypesExistsResponse(false));
return;

View File

@ -46,10 +46,9 @@ public class TransportShardFlushAction extends TransportReplicationAction<ShardF
@Inject
public TransportShardFlushAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, ShardFlushRequest::new, ShardFlushRequest::new, ThreadPool.Names.FLUSH);
}
@Override

View File

@ -60,7 +60,7 @@ public class TransportGetIndexAction extends TransportClusterInfoAction<GetIndex
@Override
protected ClusterBlockException checkBlock(GetIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override

View File

@ -56,7 +56,7 @@ public class TransportGetFieldMappingsAction extends HandledTransportAction<GetF
@Override
protected void doExecute(GetFieldMappingsRequest request, final ActionListener<GetFieldMappingsResponse> listener) {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
final AtomicInteger indexCounter = new AtomicInteger();
final AtomicInteger completionCounter = new AtomicInteger(concreteIndices.length);
final AtomicReferenceArray<Object> indexResponses = new AtomicReferenceArray<>(concreteIndices.length);

View File

@ -52,7 +52,7 @@ public class TransportGetMappingsAction extends TransportClusterInfoAction<GetMa
@Override
protected ClusterBlockException checkBlock(GetMappingsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override

View File

@ -32,9 +32,12 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import java.io.IOException;
import java.util.Arrays;
import java.util.Map;
import java.util.Objects;
import static org.elasticsearch.action.ValidateActions.addValidationError;
@ -65,6 +68,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
private String source;
private boolean updateAllTypes = false;
private Index concreteIndex;
public PutMappingRequest() {
}
@ -90,6 +94,10 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
} else if (source.isEmpty()) {
validationException = addValidationError("mapping source is empty", validationException);
}
if (concreteIndex != null && (indices != null && indices.length > 0)) {
validationException = addValidationError("either concrete index or unresolved indices can be set, concrete index: ["
+ concreteIndex + "] and indices: " + Arrays.asList(indices) , validationException);
}
return validationException;
}
@ -102,6 +110,22 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
return this;
}
/**
* Sets a concrete index for this put mapping request.
*/
public PutMappingRequest setConcreteIndex(Index index) {
Objects.requireNonNull(indices, "index must not be null");
this.concreteIndex = index;
return this;
}
/**
* Returns a concrete index for this mapping or <code>null</code> if no concrete index is defined
*/
public Index getConcreteIndex() {
return concreteIndex;
}
/**
* The indices the mappings will be put.
*/
@ -259,6 +283,7 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
source = in.readString();
updateAllTypes = in.readBoolean();
readTimeout(in);
concreteIndex = in.readOptionalWritable(Index::new);
}
@Override
@ -270,5 +295,6 @@ public class PutMappingRequest extends AcknowledgedRequest<PutMappingRequest> im
out.writeString(source);
out.writeBoolean(updateAllTypes);
writeTimeout(out);
out.writeOptionalWriteable(concreteIndex);
}
}

View File

@ -23,6 +23,7 @@ import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.index.Index;
import java.util.Map;
@ -40,6 +41,11 @@ public class PutMappingRequestBuilder extends AcknowledgedRequestBuilder<PutMapp
return this;
}
public PutMappingRequestBuilder setConcreteIndex(Index index) {
request.setConcreteIndex(index);
return this;
}
/**
* Specifies what type of requested indices to ignore and wildcard indices expressions.
* <p>

View File

@ -31,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -63,13 +64,19 @@ public class TransportPutMappingAction extends TransportMasterNodeAction<PutMapp
@Override
protected ClusterBlockException checkBlock(PutMappingRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
String[] indices;
if (request.getConcreteIndex() == null) {
indices = indexNameExpressionResolver.concreteIndexNames(state, request);
} else {
indices = new String[] {request.getConcreteIndex().getName()};
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indices);
}
@Override
protected void masterOperation(final PutMappingRequest request, final ClusterState state, final ActionListener<PutMappingResponse> listener) {
try {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = request.getConcreteIndex() == null ? indexNameExpressionResolver.concreteIndices(state, request) : new Index[] {request.getConcreteIndex()};
PutMappingClusterStateUpdateRequest updateRequest = new PutMappingClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices).type(request.type())

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -73,12 +74,12 @@ public class TransportOpenIndexAction extends TransportMasterNodeAction<OpenInde
@Override
protected ClusterBlockException checkBlock(OpenIndexRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
protected void masterOperation(final OpenIndexRequest request, final ClusterState state, final ActionListener<OpenIndexResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
OpenIndexClusterStateUpdateRequest updateRequest = new OpenIndexClusterStateUpdateRequest()
.ackTimeout(request.timeout()).masterNodeTimeout(request.masterNodeTimeout())
.indices(concreteIndices);

View File

@ -48,10 +48,9 @@ public class TransportShardRefreshAction extends TransportReplicationAction<Basi
@Inject
public TransportShardRefreshAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, BasicReplicationRequest::new, BasicReplicationRequest::new, ThreadPool.Names.REFRESH);
}
@Override

View File

@ -34,6 +34,7 @@ import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.SettingsFilter;
import org.elasticsearch.common.util.CollectionUtils;
import org.elasticsearch.index.Index;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -61,7 +62,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
@Override
protected ClusterBlockException checkBlock(GetSettingsRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@ -72,9 +73,9 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
@Override
protected void masterOperation(GetSettingsRequest request, ClusterState state, ActionListener<GetSettingsResponse> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
ImmutableOpenMap.Builder<String, Settings> indexToSettingsBuilder = ImmutableOpenMap.builder();
for (String concreteIndex : concreteIndices) {
for (Index concreteIndex : concreteIndices) {
IndexMetaData indexMetaData = state.getMetaData().index(concreteIndex);
if (indexMetaData == null) {
continue;
@ -93,7 +94,7 @@ public class TransportGetSettingsAction extends TransportMasterNodeReadAction<Ge
}
settings = settingsBuilder.build();
}
indexToSettingsBuilder.put(concreteIndex, settings);
indexToSettingsBuilder.put(concreteIndex.getName(), settings);
}
listener.onResponse(new GetSettingsResponse(indexToSettingsBuilder.build()));
}

View File

@ -32,6 +32,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -65,7 +66,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
if (request.settings().getAsMap().size() == 1 && IndexMetaData.INDEX_BLOCKS_METADATA_SETTING.exists(request.settings()) || IndexMetaData.INDEX_READ_ONLY_SETTING.exists(request.settings())) {
return null;
}
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, indexNameExpressionResolver.concreteIndexNames(state, request));
}
@Override
@ -75,7 +76,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
@Override
protected void masterOperation(final UpdateSettingsRequest request, final ClusterState state, final ActionListener<UpdateSettingsResponse> listener) {
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final Index[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(concreteIndices)
.settings(request.settings())

View File

@ -87,7 +87,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
protected void masterOperation(IndicesShardStoresRequest request, ClusterState state, ActionListener<IndicesShardStoresResponse> listener) {
final RoutingTable routingTables = state.routingTable();
final RoutingNodes routingNodes = state.getRoutingNodes();
final String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
final String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
final Set<ShardId> shardIdsToFetch = new HashSet<>();
logger.trace("using cluster state version [{}] to determine shards", state.version());
@ -115,7 +115,7 @@ public class TransportIndicesShardStoresAction extends TransportMasterNodeReadAc
@Override
protected ClusterBlockException checkBlock(IndicesShardStoresRequest request, ClusterState state) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndices(state, request));
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, indexNameExpressionResolver.concreteIndexNames(state, request));
}
private class AsyncShardStoresInfoFetches {

View File

@ -47,6 +47,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndexAlreadyExistsException;
@ -245,17 +246,18 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
if (addFailureIfIndexIsUnavailable(documentRequest, bulkRequest, responses, i, concreteIndices, metaData)) {
continue;
}
String concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
Index concreteIndex = concreteIndices.resolveIfAbsent(documentRequest);
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
MappingMetaData mappingMd = null;
if (metaData.hasIndex(concreteIndex)) {
mappingMd = metaData.index(concreteIndex).mappingOrDefault(indexRequest.type());
final IndexMetaData indexMetaData = metaData.index(concreteIndex);
if (indexMetaData != null) {
mappingMd = indexMetaData.mappingOrDefault(indexRequest.type());
}
try {
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex);
indexRequest.process(metaData, mappingMd, allowIdGeneration, concreteIndex.getName());
} catch (ElasticsearchParseException | RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, indexRequest.type(), indexRequest.id(), e);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), indexRequest.type(), indexRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "index", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
@ -263,9 +265,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
}
} else if (request instanceof DeleteRequest) {
try {
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex, (DeleteRequest)request);
TransportDeleteAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (DeleteRequest)request);
} catch(RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "delete", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
@ -274,9 +276,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
} else if (request instanceof UpdateRequest) {
try {
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex, (UpdateRequest)request);
TransportUpdateAction.resolveAndValidateRouting(metaData, concreteIndex.getName(), (UpdateRequest)request);
} catch(RoutingMissingException e) {
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex, documentRequest.type(), documentRequest.id(), e);
BulkItemResponse.Failure failure = new BulkItemResponse.Failure(concreteIndex.getName(), documentRequest.type(), documentRequest.id(), e);
BulkItemResponse bulkItemResponse = new BulkItemResponse(i, "update", failure);
responses.set(i, bulkItemResponse);
// make sure the request gets never processed again
@ -294,7 +296,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
ActionRequest request = bulkRequest.requests.get(i);
if (request instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index());
String concreteIndex = concreteIndices.getConcreteIndex(indexRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
@ -304,7 +306,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
list.add(new BulkItemRequest(i, request));
} else if (request instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index());
String concreteIndex = concreteIndices.getConcreteIndex(deleteRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
@ -314,7 +316,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
list.add(new BulkItemRequest(i, request));
} else if (request instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request;
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index());
String concreteIndex = concreteIndices.getConcreteIndex(updateRequest.index()).getName();
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
List<BulkItemRequest> list = requestsByShard.get(shardId);
if (list == null) {
@ -356,18 +358,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
public void onFailure(Throwable e) {
// create failures for all relevant requests
for (BulkItemRequest request : requests) {
final String indexName = concreteIndices.getConcreteIndex(request.index()).getName();
if (request.request() instanceof IndexRequest) {
IndexRequest indexRequest = (IndexRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), indexRequest.opType().toString().toLowerCase(Locale.ENGLISH),
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(indexRequest.index()), indexRequest.type(), indexRequest.id(), e)));
new BulkItemResponse.Failure(indexName, indexRequest.type(), indexRequest.id(), e)));
} else if (request.request() instanceof DeleteRequest) {
DeleteRequest deleteRequest = (DeleteRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "delete",
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(deleteRequest.index()), deleteRequest.type(), deleteRequest.id(), e)));
new BulkItemResponse.Failure(indexName, deleteRequest.type(), deleteRequest.id(), e)));
} else if (request.request() instanceof UpdateRequest) {
UpdateRequest updateRequest = (UpdateRequest) request.request();
responses.set(request.id(), new BulkItemResponse(request.id(), "update",
new BulkItemResponse.Failure(concreteIndices.getConcreteIndex(updateRequest.index()), updateRequest.type(), updateRequest.id(), e)));
new BulkItemResponse.Failure(indexName, updateRequest.type(), updateRequest.id(), e)));
}
}
if (counter.decrementAndGet() == 0) {
@ -385,7 +388,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
private boolean addFailureIfIndexIsUnavailable(DocumentRequest request, BulkRequest bulkRequest, AtomicArray<BulkItemResponse> responses, int idx,
final ConcreteIndices concreteIndices,
final MetaData metaData) {
String concreteIndex = concreteIndices.getConcreteIndex(request.index());
Index concreteIndex = concreteIndices.getConcreteIndex(request.index());
Exception unavailableException = null;
if (concreteIndex == null) {
try {
@ -397,9 +400,9 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
}
}
if (unavailableException == null) {
IndexMetaData indexMetaData = metaData.index(concreteIndex);
IndexMetaData indexMetaData = metaData.getIndexSafe(concreteIndex);
if (indexMetaData.getState() == IndexMetaData.State.CLOSE) {
unavailableException = new IndexClosedException(metaData.index(request.index()).getIndex());
unavailableException = new IndexClosedException(concreteIndex);
}
}
if (unavailableException != null) {
@ -425,19 +428,19 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
private static class ConcreteIndices {
private final ClusterState state;
private final IndexNameExpressionResolver indexNameExpressionResolver;
private final Map<String, String> indices = new HashMap<>();
private final Map<String, Index> indices = new HashMap<>();
ConcreteIndices(ClusterState state, IndexNameExpressionResolver indexNameExpressionResolver) {
this.state = state;
this.indexNameExpressionResolver = indexNameExpressionResolver;
}
String getConcreteIndex(String indexOrAlias) {
Index getConcreteIndex(String indexOrAlias) {
return indices.get(indexOrAlias);
}
String resolveIfAbsent(DocumentRequest request) {
String concreteIndex = indices.get(request.index());
Index resolveIfAbsent(DocumentRequest request) {
Index concreteIndex = indices.get(request.index());
if (concreteIndex == null) {
concreteIndex = indexNameExpressionResolver.concreteSingleIndex(state, request);
indices.put(request.index(), concreteIndex);

View File

@ -47,7 +47,6 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.engine.Engine;
@ -75,17 +74,19 @@ public class TransportShardBulkAction extends TransportReplicationAction<BulkSha
private final UpdateHelper updateHelper;
private final boolean allowIdGeneration;
private final MappingUpdatedAction mappingUpdatedAction;
@Inject
public TransportShardBulkAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, UpdateHelper updateHelper, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver,
super(settings, ACTION_NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver,
BulkShardRequest::new, BulkShardRequest::new, ThreadPool.Names.BULK);
this.updateHelper = updateHelper;
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
this.mappingUpdatedAction = mappingUpdatedAction;
}
@Override

View File

@ -60,10 +60,10 @@ public class TransportDeleteAction extends TransportReplicationAction<DeleteRequ
public TransportDeleteAction(Settings settings, TransportService transportService, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ShardStateAction shardStateAction,
TransportCreateIndexAction createIndexAction, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, MappingUpdatedAction mappingUpdatedAction,
IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex) {
super(settings, DeleteAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
mappingUpdatedAction, actionFilters, indexNameExpressionResolver,
actionFilters, indexNameExpressionResolver,
DeleteRequest::new, DeleteRequest::new, ThreadPool.Names.INDEX);
this.createIndexAction = createIndexAction;
this.autoCreateIndex = autoCreateIndex;

View File

@ -69,7 +69,7 @@ public class TransportMultiGetAction extends HandledTransportAction<MultiGetRequ
continue;
}
item.routing(clusterState.metaData().resolveIndexRouting(item.parent(), item.routing(), item.index()));
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item);
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, item).getName();
if (item.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, item.type())) {
responses.set(i, new MultiGetItemResponse(null, new MultiGetResponse.Failure(concreteSingleIndex, item.type(), item.id(),
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + item.type() + "]/[" + item.id() + "]"))));

View File

@ -42,6 +42,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.mapper.internal.TimestampFieldMapper;
@ -584,14 +585,6 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
return this.versionType;
}
private Version getVersion(MetaData metaData, String concreteIndex) {
// this can go away in 3.0 but is here now for easy backporting - since in 2.x we need the version on the timestamp stuff
final IndexMetaData indexMetaData = metaData.getIndices().get(concreteIndex);
if (indexMetaData == null) {
throw new IndexNotFoundException(concreteIndex);
}
return Version.indexCreated(indexMetaData.getSettings());
}
public void process(MetaData metaData, @Nullable MappingMetaData mappingMd, boolean allowIdGeneration, String concreteIndex) {
// resolve the routing if needed
@ -600,8 +593,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
// resolve timestamp if provided externally
if (timestamp != null) {
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(timestamp,
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER,
getVersion(metaData, concreteIndex));
mappingMd != null ? mappingMd.timestamp().dateTimeFormatter() : TimestampFieldMapper.Defaults.DATE_TIME_FORMATTER);
}
if (mappingMd != null) {
// might as well check for routing here
@ -645,7 +637,7 @@ public class IndexRequest extends ReplicationRequest<IndexRequest> implements Do
// assigned again because mappingMd and
// mappingMd#timestamp() are not null
assert mappingMd != null;
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter(), getVersion(metaData, concreteIndex));
timestamp = MappingMetaData.Timestamp.parseStringTimestamp(defaultTimestamp, mappingMd.timestamp().dateTimeFormatter());
}
}
}

View File

@ -69,6 +69,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
private final TransportCreateIndexAction createIndexAction;
private final ClusterService clusterService;
private final MappingUpdatedAction mappingUpdatedAction;
@Inject
public TransportIndexAction(Settings settings, TransportService transportService, ClusterService clusterService,
@ -76,8 +77,9 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
TransportCreateIndexAction createIndexAction, MappingUpdatedAction mappingUpdatedAction,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
AutoCreateIndex autoCreateIndex) {
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction, mappingUpdatedAction,
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
super(settings, IndexAction.NAME, transportService, clusterService, indicesService, threadPool, shardStateAction,
actionFilters, indexNameExpressionResolver, IndexRequest::new, IndexRequest::new, ThreadPool.Names.INDEX);
this.mappingUpdatedAction = mappingUpdatedAction;
this.createIndexAction = createIndexAction;
this.autoCreateIndex = autoCreateIndex;
this.allowIdGeneration = settings.getAsBoolean("action.allow_id_generation", true);
@ -143,7 +145,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
protected Tuple<IndexResponse, IndexRequest> shardOperationOnPrimary(MetaData metaData, IndexRequest request) throws Exception {
// validate, if routing is required, that we got routing
IndexMetaData indexMetaData = metaData.index(request.shardId().getIndex());
IndexMetaData indexMetaData = metaData.getIndexSafe(request.shardId().getIndex());
MappingMetaData mappingMd = indexMetaData.mappingOrDefault(request.type());
if (mappingMd != null && mappingMd.routing().required()) {
if (request.routing() == null) {
@ -205,8 +207,7 @@ public class TransportIndexAction extends TransportReplicationAction<IndexReques
Mapping update = operation.parsedDoc().dynamicMappingsUpdate();
final ShardId shardId = indexShard.shardId();
if (update != null) {
final String indexName = shardId.getIndexName();
mappingUpdatedAction.updateMappingOnMasterSynchronously(indexName, request.type(), update);
mappingUpdatedAction.updateMappingOnMaster(shardId.getIndex(), request.type(), update);
operation = prepareIndexOperationOnPrimary(request, indexShard);
update = operation.parsedDoc().dynamicMappingsUpdate();
if (update != null) {

View File

@ -173,7 +173,7 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
PercolateRequest percolateRequest = (PercolateRequest) element;
String[] concreteIndices;
try {
concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, percolateRequest);
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, percolateRequest);
} catch (IndexNotFoundException e) {
reducedResponses.set(slot, e);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));

View File

@ -96,7 +96,7 @@ abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult>
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
// of just for the _search api
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(),
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request.indicesOptions(),
startTime(), request.indices());
for (String index : concreteIndices) {

View File

@ -64,7 +64,7 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
// optimize search type for cases where there is only one shard group to search on
try {
ClusterState clusterState = clusterService.state();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, searchRequest);
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
searchRequest.routing(), searchRequest.indices());
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);

View File

@ -27,6 +27,7 @@ import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.mapper.MapperService;
@ -39,7 +40,8 @@ import java.util.List;
*/
public final class AutoCreateIndex {
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING = new Setting<>("action.auto_create_index", "true", AutoCreate::new, false, Setting.Scope.CLUSTER);
public static final Setting<AutoCreate> AUTO_CREATE_INDEX_SETTING =
new Setting<>("action.auto_create_index", "true", AutoCreate::new, Property.NodeScope);
private final boolean dynamicMappingDisabled;
private final IndexNameExpressionResolver resolver;

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -33,7 +34,8 @@ public final class DestructiveOperations extends AbstractComponent {
/**
* Setting which controls whether wildcard usage (*, prefix*, _all) is allowed.
*/
public static final Setting<Boolean> REQUIRES_NAME_SETTING = Setting.boolSetting("action.destructive_requires_name", false, true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> REQUIRES_NAME_SETTING =
Setting.boolSetting("action.destructive_requires_name", false, Property.Dynamic, Property.NodeScope);
private volatile boolean destructiveRequiresName;
@Inject

View File

@ -125,7 +125,7 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest<
throw blockException;
}
// update to concrete indices
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
blockException = checkRequestBlock(clusterState, request, concreteIndices);
if (blockException != null) {
throw blockException;

View File

@ -51,7 +51,6 @@ import org.elasticsearch.transport.BaseTransportResponseHandler;
import org.elasticsearch.transport.NodeShouldNotConnectException;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportException;
import org.elasticsearch.transport.TransportRequest;
import org.elasticsearch.transport.TransportRequestHandler;
import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService;
@ -241,7 +240,7 @@ public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRe
throw globalBlockException;
}
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
ClusterBlockException requestBlockException = checkRequestBlock(clusterState, request, concreteIndices);
if (requestBlockException != null) {
throw requestBlockException;

View File

@ -24,6 +24,7 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
@ -37,7 +38,8 @@ import java.util.function.Supplier;
public abstract class TransportMasterNodeReadAction<Request extends MasterNodeReadRequest<Request>, Response extends ActionResponse>
extends TransportMasterNodeAction<Request, Response> {
public static final Setting<Boolean> FORCE_LOCAL_SETTING = Setting.boolSetting("action.master.force_local", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> FORCE_LOCAL_SETTING =
Setting.boolSetting("action.master.force_local", false, Property.NodeScope);
private final boolean forceLocal;

View File

@ -50,7 +50,7 @@ public abstract class TransportClusterInfoAction<Request extends ClusterInfoRequ
@Override
protected final void masterOperation(final Request request, final ClusterState state, final ActionListener<Response> listener) {
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(state, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(state, request);
doMasterOperation(request, concreteIndices, state, listener);
}

View File

@ -97,7 +97,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
@Override
public void onFailure(Throwable e) {
logger.trace("{}: got failure from {}", actionName, shardId);
int totalNumCopies = clusterState.getMetaData().index(shardId.getIndexName()).getNumberOfReplicas() + 1;
int totalNumCopies = clusterState.getMetaData().getIndexSafe(shardId.getIndex()).getNumberOfReplicas() + 1;
ShardResponse shardResponse = newShardResponse();
ReplicationResponse.ShardInfo.Failure[] failures;
if (TransportActions.isShardNotAvailableException(e)) {
@ -130,7 +130,7 @@ public abstract class TransportBroadcastReplicationAction<Request extends Broadc
*/
protected List<ShardId> shards(Request request, ClusterState clusterState) {
List<ShardId> shardIds = new ArrayList<>();
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
String[] concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, request);
for (String index : concreteIndices) {
IndexMetaData indexMetaData = clusterState.metaData().getIndices().get(index);
if (indexMetaData != null) {

View File

@ -103,7 +103,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected final ShardStateAction shardStateAction;
protected final WriteConsistencyLevel defaultWriteConsistencyLevel;
protected final TransportRequestOptions transportOptions;
protected final MappingUpdatedAction mappingUpdatedAction;
final String transportReplicaAction;
final String transportPrimaryAction;
@ -113,7 +112,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
protected TransportReplicationAction(Settings settings, String actionName, TransportService transportService,
ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ShardStateAction shardStateAction,
MappingUpdatedAction mappingUpdatedAction, ActionFilters actionFilters,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Supplier<Request> request,
Supplier<ReplicaRequest> replicaRequest, String executor) {
super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, transportService.getTaskManager());
@ -121,7 +120,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
this.clusterService = clusterService;
this.indicesService = indicesService;
this.shardStateAction = shardStateAction;
this.mappingUpdatedAction = mappingUpdatedAction;
this.transportPrimaryAction = actionName + "[p]";
this.transportReplicaAction = actionName + "[r]";
@ -525,7 +523,7 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
}
private String concreteIndex(ClusterState state) {
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request) : request.index();
return resolveIndex() ? indexNameExpressionResolver.concreteSingleIndex(state, request).getName() : request.index();
}
private ShardRouting primary(ClusterState state) {

View File

@ -138,7 +138,7 @@ public abstract class TransportInstanceSingleOperationAction<Request extends Ins
throw blockException;
}
}
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request));
request.concreteIndex(indexNameExpressionResolver.concreteSingleIndex(observer.observedState(), request).getName());
resolveRequest(observer.observedState(), request);
blockException = checkRequestBlock(observer.observedState(), request);
if (blockException != null) {

View File

@ -141,7 +141,7 @@ public abstract class TransportSingleShardAction<Request extends SingleShardRequ
String concreteSingleIndex;
if (resolveIndex(request)) {
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, request);
concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, request).getName();
} else {
concreteSingleIndex = request.index();
}

View File

@ -72,7 +72,7 @@ public class TransportMultiTermVectorsAction extends HandledTransportAction<Mult
termVectorsRequest.type(), termVectorsRequest.id(), new IndexNotFoundException(termVectorsRequest.index()))));
continue;
}
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, (DocumentRequest) termVectorsRequest);
String concreteSingleIndex = indexNameExpressionResolver.concreteSingleIndex(clusterState, termVectorsRequest).getName();
if (termVectorsRequest.routing() == null && clusterState.getMetaData().routingRequired(concreteSingleIndex, termVectorsRequest.type())) {
responses.set(i, new MultiTermVectorsItemResponse(null, new MultiTermVectorsResponse.Failure(concreteSingleIndex, termVectorsRequest.type(), termVectorsRequest.id(),
new IllegalArgumentException("routing is required for [" + concreteSingleIndex + "]/[" + termVectorsRequest.type() + "]/[" + termVectorsRequest.id() + "]"))));

View File

@ -20,7 +20,7 @@
package org.elasticsearch.bootstrap;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.settings.Setting.Property;
public final class BootstrapSettings {
@ -29,10 +29,13 @@ public final class BootstrapSettings {
// TODO: remove this hack when insecure defaults are removed from java
public static final Setting<Boolean> SECURITY_FILTER_BAD_DEFAULTS_SETTING =
Setting.boolSetting("security.manager.filter_bad_defaults", true, false, Scope.CLUSTER);
Setting.boolSetting("security.manager.filter_bad_defaults", true, Property.NodeScope);
public static final Setting<Boolean> MLOCKALL_SETTING = Setting.boolSetting("bootstrap.mlockall", false, false, Scope.CLUSTER);
public static final Setting<Boolean> SECCOMP_SETTING = Setting.boolSetting("bootstrap.seccomp", true, false, Scope.CLUSTER);
public static final Setting<Boolean> CTRLHANDLER_SETTING = Setting.boolSetting("bootstrap.ctrlhandler", true, false, Scope.CLUSTER);
public static final Setting<Boolean> MLOCKALL_SETTING =
Setting.boolSetting("bootstrap.mlockall", false, Property.NodeScope);
public static final Setting<Boolean> SECCOMP_SETTING =
Setting.boolSetting("bootstrap.seccomp", true, Property.NodeScope);
public static final Setting<Boolean> CTRLHANDLER_SETTING =
Setting.boolSetting("bootstrap.ctrlhandler", true, Property.NodeScope);
}

View File

@ -19,13 +19,13 @@
package org.elasticsearch.cache.recycler;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.recycler.AbstractRecyclerC;
import org.elasticsearch.common.recycler.Recycler;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
@ -43,13 +43,19 @@ import static org.elasticsearch.common.recycler.Recyclers.none;
/** A recycler of fixed-size pages. */
public class PageCacheRecycler extends AbstractComponent implements Releasable {
public static final Setting<Type> TYPE_SETTING = new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING = Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_BYTES_SETTING = Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_LONG_SETTING = Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_INT_SETTING = Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Type> TYPE_SETTING =
new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope);
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING =
Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
public static final Setting<Double> WEIGHT_BYTES_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope);
public static final Setting<Double> WEIGHT_LONG_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.longs", 1d, 0d, Property.NodeScope);
public static final Setting<Double> WEIGHT_INT_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.ints", 1d, 0d, Property.NodeScope);
// object pages are less useful to us so we give them a lower weight by default
public static final Setting<Double> WEIGHT_OBJECTS_SETTING = Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, false, Setting.Scope.CLUSTER);
public static final Setting<Double> WEIGHT_OBJECTS_SETTING =
Setting.doubleSetting("cache.recycler.page.weight.objects", 0.1d, 0d, Property.NodeScope);
private final Recycler<byte[]> bytePage;
private final Recycler<int[]> intPage;

View File

@ -19,12 +19,8 @@
package org.elasticsearch.client;
import org.elasticsearch.action.Action;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.bulk.BulkRequest;
import org.elasticsearch.action.bulk.BulkRequestBuilder;
import org.elasticsearch.action.bulk.BulkResponse;
@ -87,6 +83,7 @@ import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.lease.Releasable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.Map;
@ -114,7 +111,7 @@ public interface Client extends ElasticsearchClient, Releasable {
default:
throw new IllegalArgumentException("Can't parse [client.type] must be one of [node, transport]");
}
}, false, Setting.Scope.CLUSTER);
}, Property.NodeScope);
/**
* The admin client that can be used to perform administrative operations.

View File

@ -34,6 +34,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.TimeValue;
@ -100,10 +101,14 @@ public class TransportClientNodesService extends AbstractComponent {
private volatile boolean closed;
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL = Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT = Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME = Setting.boolSetting("client.transport.ignore_cluster_name", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF = Setting.boolSetting("client.transport.sniff", false, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLIENT_TRANSPORT_NODES_SAMPLER_INTERVAL =
Setting.positiveTimeSetting("client.transport.nodes_sampler_interval", timeValueSeconds(5), Property.NodeScope);
public static final Setting<TimeValue> CLIENT_TRANSPORT_PING_TIMEOUT =
Setting.positiveTimeSetting("client.transport.ping_timeout", timeValueSeconds(5), Property.NodeScope);
public static final Setting<Boolean> CLIENT_TRANSPORT_IGNORE_CLUSTER_NAME =
Setting.boolSetting("client.transport.ignore_cluster_name", false, Property.NodeScope);
public static final Setting<Boolean> CLIENT_TRANSPORT_SNIFF =
Setting.boolSetting("client.transport.sniff", false, Property.NodeScope);
@Inject
public TransportClientNodesService(Settings settings, ClusterName clusterName, TransportService transportService,

View File

@ -135,8 +135,8 @@ public class ClusterChangedEvent {
List<Index> deleted = null;
for (ObjectCursor<IndexMetaData> cursor : previousState.metaData().indices().values()) {
IndexMetaData index = cursor.value;
IndexMetaData current = state.metaData().index(index.getIndex().getName());
if (current == null || index.getIndexUUID().equals(current.getIndexUUID()) == false) {
IndexMetaData current = state.metaData().index(index.getIndex());
if (current == null) {
if (deleted == null) {
deleted = new ArrayList<>();
}

View File

@ -58,6 +58,7 @@ import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.gateway.GatewayAllocator;
@ -74,7 +75,8 @@ public class ClusterModule extends AbstractModule {
public static final String EVEN_SHARD_COUNT_ALLOCATOR = "even_shard";
public static final String BALANCED_ALLOCATOR = "balanced"; // default
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING = new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> SHARDS_ALLOCATOR_TYPE_SETTING =
new Setting<>("cluster.routing.allocation.type", BALANCED_ALLOCATOR, Function.identity(), Property.NodeScope);
public static final List<Class<? extends AllocationDecider>> DEFAULT_ALLOCATION_DECIDERS =
Collections.unmodifiableList(Arrays.asList(
SameShardAllocationDecider.class,

View File

@ -23,6 +23,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.io.IOException;
@ -37,7 +38,7 @@ public class ClusterName implements Streamable {
throw new IllegalArgumentException("[cluster.name] must not be empty");
}
return s;
}, false, Setting.Scope.CLUSTER);
}, Property.NodeScope);
public static final ClusterName DEFAULT = new ClusterName(CLUSTER_NAME_SETTING.getDefault(Settings.EMPTY).intern());

View File

@ -39,6 +39,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
@ -64,8 +65,12 @@ import java.util.concurrent.TimeUnit;
*/
public class InternalClusterInfoService extends AbstractComponent implements ClusterInfoService, LocalNodeMasterListener, ClusterStateListener {
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING = Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING = Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_UPDATE_INTERVAL_SETTING =
Setting.timeSetting("cluster.info.update.interval", TimeValue.timeValueSeconds(30), TimeValue.timeValueSeconds(10),
Property.Dynamic, Property.NodeScope);
public static final Setting<TimeValue> INTERNAL_CLUSTER_INFO_TIMEOUT_SETTING =
Setting.positiveTimeSetting("cluster.info.update.timeout", TimeValue.timeValueSeconds(15),
Property.Dynamic, Property.NodeScope);
private volatile TimeValue updateFrequency;

View File

@ -35,6 +35,10 @@ import org.elasticsearch.transport.TransportService;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ScheduledFuture;
import static org.elasticsearch.common.settings.Setting.Property;
import static org.elasticsearch.common.settings.Setting.positiveTimeSetting;
/**
* This component is responsible for connecting to nodes once they are added to the cluster state, and disconnect when they are
* removed. Also, it periodically checks that all connections are still open and if needed restores them.
@ -45,7 +49,7 @@ import java.util.concurrent.ScheduledFuture;
public class NodeConnectionsService extends AbstractLifecycleComponent<NodeConnectionsService> {
public static final Setting<TimeValue> CLUSTER_NODE_RECONNECT_INTERVAL_SETTING =
Setting.positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
positiveTimeSetting("cluster.nodes.reconnect_interval", TimeValue.timeValueSeconds(10), Property.NodeScope);
private final ThreadPool threadPool;
private final TransportService transportService;

View File

@ -18,17 +18,19 @@
*/
package org.elasticsearch.cluster.ack;
import org.elasticsearch.index.Index;
/**
* Base cluster state update request that allows to execute update against multiple indices
*/
public abstract class IndicesClusterStateUpdateRequest<T extends IndicesClusterStateUpdateRequest<T>> extends ClusterStateUpdateRequest<T> {
private String[] indices;
private Index[] indices;
/**
* Returns the indices the operation needs to be executed on
*/
public String[] indices() {
public Index[] indices() {
return indices;
}
@ -36,7 +38,7 @@ public abstract class IndicesClusterStateUpdateRequest<T extends IndicesClusterS
* Sets the indices the operation needs to be executed on
*/
@SuppressWarnings("unchecked")
public T indices(String[] indices) {
public T indices(Index[] indices) {
this.indices = indices;
return (T)this;
}

View File

@ -19,17 +19,17 @@
package org.elasticsearch.cluster.action.index;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingRequestBuilder;
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Mapping;
@ -41,7 +41,9 @@ import java.util.concurrent.TimeoutException;
*/
public class MappingUpdatedAction extends AbstractComponent {
public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING = Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> INDICES_MAPPING_DYNAMIC_TIMEOUT_SETTING =
Setting.positiveTimeSetting("indices.mapping.dynamic_timeout", TimeValue.timeValueSeconds(30),
Property.Dynamic, Property.NodeScope);
private IndicesAdminClient client;
private volatile TimeValue dynamicMappingUpdateTimeout;
@ -62,48 +64,20 @@ public class MappingUpdatedAction extends AbstractComponent {
this.client = client.admin().indices();
}
private PutMappingRequestBuilder updateMappingRequest(String index, String type, Mapping mappingUpdate, final TimeValue timeout) {
private PutMappingRequestBuilder updateMappingRequest(Index index, String type, Mapping mappingUpdate, final TimeValue timeout) {
if (type.equals(MapperService.DEFAULT_MAPPING)) {
throw new IllegalArgumentException("_default_ mapping should not be updated");
}
return client.preparePutMapping(index).setType(type).setSource(mappingUpdate.toString())
return client.preparePutMapping().setConcreteIndex(index).setType(type).setSource(mappingUpdate.toString())
.setMasterNodeTimeout(timeout).setTimeout(timeout);
}
public void updateMappingOnMaster(String index, String type, Mapping mappingUpdate, final TimeValue timeout, final MappingUpdateListener listener) {
final PutMappingRequestBuilder request = updateMappingRequest(index, type, mappingUpdate, timeout);
if (listener == null) {
request.execute();
} else {
final ActionListener<PutMappingResponse> actionListener = new ActionListener<PutMappingResponse>() {
@Override
public void onResponse(PutMappingResponse response) {
if (response.isAcknowledged()) {
listener.onMappingUpdate();
} else {
listener.onFailure(new TimeoutException("Failed to acknowledge the mapping response within [" + timeout + "]"));
}
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
};
request.execute(actionListener);
}
}
public void updateMappingOnMasterAsynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout, null);
}
/**
* Same as {@link #updateMappingOnMasterSynchronously(String, String, Mapping, TimeValue)}
* Same as {@link #updateMappingOnMaster(Index, String, Mapping, TimeValue)}
* using the default timeout.
*/
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMasterSynchronously(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate) throws Exception {
updateMappingOnMaster(index, type, mappingUpdate, dynamicMappingUpdateTimeout);
}
/**
@ -111,19 +85,9 @@ public class MappingUpdatedAction extends AbstractComponent {
* {@code timeout}. When this method returns successfully mappings have
* been applied to the master node and propagated to data nodes.
*/
public void updateMappingOnMasterSynchronously(String index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
public void updateMappingOnMaster(Index index, String type, Mapping mappingUpdate, TimeValue timeout) throws Exception {
if (updateMappingRequest(index, type, mappingUpdate, timeout).get().isAcknowledged() == false) {
throw new TimeoutException("Failed to acknowledge mapping update within [" + timeout + "]");
}
}
/**
* A listener to be notified when the mappings were updated
*/
public static interface MappingUpdateListener {
void onMappingUpdate();
void onFailure(Throwable t);
}
}

View File

@ -76,7 +76,7 @@ public class NodeIndexDeletedAction extends AbstractComponent {
listeners.remove(listener);
}
public void nodeIndexDeleted(final ClusterState clusterState, final String index, final IndexSettings indexSettings, final String nodeId) {
public void nodeIndexDeleted(final ClusterState clusterState, final Index index, final IndexSettings indexSettings, final String nodeId) {
final DiscoveryNodes nodes = clusterState.nodes();
transportService.sendRequest(clusterState.nodes().masterNode(),
INDEX_DELETED_ACTION_NAME, new NodeIndexDeletedMessage(index, nodeId), EmptyTransportResponseHandler.INSTANCE_SAME);
@ -97,7 +97,7 @@ public class NodeIndexDeletedAction extends AbstractComponent {
});
}
private void lockIndexAndAck(String index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException {
private void lockIndexAndAck(Index index, DiscoveryNodes nodes, String nodeId, ClusterState clusterState, IndexSettings indexSettings) throws IOException {
try {
// we are waiting until we can lock the index / all shards on the node and then we ack the delete of the store to the
// master. If we can't acquire the locks here immediately there might be a shard of this index still holding on to the lock
@ -114,9 +114,9 @@ public class NodeIndexDeletedAction extends AbstractComponent {
}
public interface Listener {
void onNodeIndexDeleted(String index, String nodeId);
void onNodeIndexDeleted(Index index, String nodeId);
void onNodeIndexStoreDeleted(String index, String nodeId);
void onNodeIndexStoreDeleted(Index index, String nodeId);
}
private class NodeIndexDeletedTransportHandler implements TransportRequestHandler<NodeIndexDeletedMessage> {
@ -143,13 +143,13 @@ public class NodeIndexDeletedAction extends AbstractComponent {
public static class NodeIndexDeletedMessage extends TransportRequest {
String index;
Index index;
String nodeId;
public NodeIndexDeletedMessage() {
}
NodeIndexDeletedMessage(String index, String nodeId) {
NodeIndexDeletedMessage(Index index, String nodeId) {
this.index = index;
this.nodeId = nodeId;
}
@ -157,27 +157,27 @@ public class NodeIndexDeletedAction extends AbstractComponent {
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
index.writeTo(out);
out.writeString(nodeId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
index = new Index(in);
nodeId = in.readString();
}
}
public static class NodeIndexStoreDeletedMessage extends TransportRequest {
String index;
Index index;
String nodeId;
public NodeIndexStoreDeletedMessage() {
}
NodeIndexStoreDeletedMessage(String index, String nodeId) {
NodeIndexStoreDeletedMessage(Index index, String nodeId) {
this.index = index;
this.nodeId = nodeId;
}
@ -185,14 +185,14 @@ public class NodeIndexDeletedAction extends AbstractComponent {
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(index);
index.writeTo(out);
out.writeString(nodeId);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
index = in.readString();
index = new Index(in);
nodeId = in.readString();
}
}

View File

@ -20,6 +20,7 @@ package org.elasticsearch.cluster.metadata;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
/**
* This class acts as a functional wrapper around the <tt>index.auto_expand_replicas</tt> setting.
@ -56,7 +57,7 @@ final class AutoExpandReplicas {
}
}
return new AutoExpandReplicas(min, max, true);
}, true, Setting.Scope.INDEX);
}, Property.Dynamic, Property.IndexScope);
private final int minReplicas;
private final int maxReplicas;

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.xcontent.FromXContentBuilder;
@ -152,28 +153,36 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static final String INDEX_SETTING_PREFIX = "index.";
public static final String SETTING_NUMBER_OF_SHARDS = "index.number_of_shards";
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, false, Setting.Scope.INDEX);
public static final Setting<Integer> INDEX_NUMBER_OF_SHARDS_SETTING =
Setting.intSetting(SETTING_NUMBER_OF_SHARDS, 5, 1, Property.IndexScope);
public static final String SETTING_NUMBER_OF_REPLICAS = "index.number_of_replicas";
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING = Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, true, Setting.Scope.INDEX);
public static final Setting<Integer> INDEX_NUMBER_OF_REPLICAS_SETTING =
Setting.intSetting(SETTING_NUMBER_OF_REPLICAS, 1, 0, Property.Dynamic, Property.IndexScope);
public static final String SETTING_SHADOW_REPLICAS = "index.shadow_replicas";
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING = Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, false, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_SHADOW_REPLICAS_SETTING =
Setting.boolSetting(SETTING_SHADOW_REPLICAS, false, Property.IndexScope);
public static final String SETTING_SHARED_FILESYSTEM = "index.shared_filesystem";
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING = Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, false, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_SHARED_FILESYSTEM_SETTING =
Setting.boolSetting(SETTING_SHARED_FILESYSTEM, false, Property.IndexScope);
public static final String SETTING_AUTO_EXPAND_REPLICAS = "index.auto_expand_replicas";
public static final Setting<AutoExpandReplicas> INDEX_AUTO_EXPAND_REPLICAS_SETTING = AutoExpandReplicas.SETTING;
public static final String SETTING_READ_ONLY = "index.blocks.read_only";
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING = Setting.boolSetting(SETTING_READ_ONLY, false, true, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_READ_ONLY_SETTING =
Setting.boolSetting(SETTING_READ_ONLY, false, Property.Dynamic, Property.IndexScope);
public static final String SETTING_BLOCKS_READ = "index.blocks.read";
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING = Setting.boolSetting(SETTING_BLOCKS_READ, false, true, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_BLOCKS_READ_SETTING =
Setting.boolSetting(SETTING_BLOCKS_READ, false, Property.Dynamic, Property.IndexScope);
public static final String SETTING_BLOCKS_WRITE = "index.blocks.write";
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING = Setting.boolSetting(SETTING_BLOCKS_WRITE, false, true, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_BLOCKS_WRITE_SETTING =
Setting.boolSetting(SETTING_BLOCKS_WRITE, false, Property.Dynamic, Property.IndexScope);
public static final String SETTING_BLOCKS_METADATA = "index.blocks.metadata";
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING = Setting.boolSetting(SETTING_BLOCKS_METADATA, false, true, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_BLOCKS_METADATA_SETTING =
Setting.boolSetting(SETTING_BLOCKS_METADATA, false, Property.Dynamic, Property.IndexScope);
public static final String SETTING_VERSION_CREATED = "index.version.created";
public static final String SETTING_VERSION_CREATED_STRING = "index.version.created_string";
@ -182,18 +191,24 @@ public class IndexMetaData implements Diffable<IndexMetaData>, FromXContentBuild
public static final String SETTING_VERSION_MINIMUM_COMPATIBLE = "index.version.minimum_compatible";
public static final String SETTING_CREATION_DATE = "index.creation_date";
public static final String SETTING_PRIORITY = "index.priority";
public static final Setting<Integer> INDEX_PRIORITY_SETTING = Setting.intSetting("index.priority", 1, 0, true, Setting.Scope.INDEX);
public static final Setting<Integer> INDEX_PRIORITY_SETTING =
Setting.intSetting("index.priority", 1, 0, Property.Dynamic, Property.IndexScope);
public static final String SETTING_CREATION_DATE_STRING = "index.creation_date_string";
public static final String SETTING_INDEX_UUID = "index.uuid";
public static final String SETTING_DATA_PATH = "index.data_path";
public static final Setting<String> INDEX_DATA_PATH_SETTING = new Setting<>(SETTING_DATA_PATH, "", Function.identity(), false, Setting.Scope.INDEX);
public static final Setting<String> INDEX_DATA_PATH_SETTING =
new Setting<>(SETTING_DATA_PATH, "", Function.identity(), Property.IndexScope);
public static final String SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE = "index.shared_filesystem.recover_on_any_node";
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING = Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, true, Setting.Scope.INDEX);
public static final Setting<Boolean> INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING =
Setting.boolSetting(SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, false, Property.Dynamic, Property.IndexScope);
public static final String INDEX_UUID_NA_VALUE = "_na_";
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.require.", true, Setting.Scope.INDEX);
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.include.", true, Setting.Scope.INDEX);
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("index.routing.allocation.exclude.", true, Setting.Scope.INDEX);
public static final Setting<Settings> INDEX_ROUTING_REQUIRE_GROUP_SETTING =
Setting.groupSetting("index.routing.allocation.require.", Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_INCLUDE_GROUP_SETTING =
Setting.groupSetting("index.routing.allocation.include.", Property.Dynamic, Property.IndexScope);
public static final Setting<Settings> INDEX_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.groupSetting("index.routing.allocation.exclude.", Property.Dynamic, Property.IndexScope);
public static final IndexMetaData PROTO = IndexMetaData.builder("")
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT))

View File

@ -32,6 +32,7 @@ import org.elasticsearch.common.joda.DateMathParser;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException;
import org.joda.time.DateTimeZone;
@ -65,11 +66,20 @@ public class IndexNameExpressionResolver extends AbstractComponent {
);
}
/**
* Same as {@link #concreteIndexNames(ClusterState, IndicesOptions, String...)}, but the index expressions and options
* are encapsulated in the specified request.
*/
public String[] concreteIndexNames(ClusterState state, IndicesRequest request) {
Context context = new Context(state, request.indicesOptions());
return concreteIndexNames(context, request.indices());
}
/**
* Same as {@link #concreteIndices(ClusterState, IndicesOptions, String...)}, but the index expressions and options
* are encapsulated in the specified request.
*/
public String[] concreteIndices(ClusterState state, IndicesRequest request) {
public Index[] concreteIndices(ClusterState state, IndicesRequest request) {
Context context = new Context(state, request.indicesOptions());
return concreteIndices(context, request.indices());
}
@ -87,7 +97,25 @@ public class IndexNameExpressionResolver extends AbstractComponent {
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
* indices options in the context don't allow such a case.
*/
public String[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) {
public String[] concreteIndexNames(ClusterState state, IndicesOptions options, String... indexExpressions) {
Context context = new Context(state, options);
return concreteIndexNames(context, indexExpressions);
}
/**
* Translates the provided index expression into actual concrete indices, properly deduplicated.
*
* @param state the cluster state containing all the data to resolve to expressions to concrete indices
* @param options defines how the aliases or indices need to be resolved to concrete indices
* @param indexExpressions expressions that can be resolved to alias or index names.
* @return the resolved concrete indices based on the cluster state, indices options and index expressions
* @throws IndexNotFoundException if one of the index expressions is pointing to a missing index or alias and the
* provided indices options in the context don't allow such a case, or if the final result of the indices resolution
* contains no indices and the indices options in the context don't allow such a case.
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
* indices options in the context don't allow such a case.
*/
public Index[] concreteIndices(ClusterState state, IndicesOptions options, String... indexExpressions) {
Context context = new Context(state, options);
return concreteIndices(context, indexExpressions);
}
@ -105,12 +133,21 @@ public class IndexNameExpressionResolver extends AbstractComponent {
* @throws IllegalArgumentException if one of the aliases resolve to multiple indices and the provided
* indices options in the context don't allow such a case.
*/
public String[] concreteIndices(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) {
public String[] concreteIndexNames(ClusterState state, IndicesOptions options, long startTime, String... indexExpressions) {
Context context = new Context(state, options, startTime);
return concreteIndices(context, indexExpressions);
return concreteIndexNames(context, indexExpressions);
}
String[] concreteIndices(Context context, String... indexExpressions) {
String[] concreteIndexNames(Context context, String... indexExpressions) {
Index[] indexes = concreteIndices(context, indexExpressions);
String[] names = new String[indexes.length];
for (int i = 0; i < indexes.length; i++) {
names[i] = indexes[i].getName();
}
return names;
}
Index[] concreteIndices(Context context, String... indexExpressions) {
if (indexExpressions == null || indexExpressions.length == 0) {
indexExpressions = new String[]{MetaData.ALL};
}
@ -136,11 +173,11 @@ public class IndexNameExpressionResolver extends AbstractComponent {
infe.setResources("index_expression", indexExpressions);
throw infe;
} else {
return Strings.EMPTY_ARRAY;
return Index.EMPTY_ARRAY;
}
}
final Set<String> concreteIndices = new HashSet<>(expressions.size());
final Set<Index> concreteIndices = new HashSet<>(expressions.size());
for (String expression : expressions) {
AliasOrIndex aliasOrIndex = metaData.getAliasAndIndexLookup().get(expression);
if (aliasOrIndex == null) {
@ -169,11 +206,11 @@ public class IndexNameExpressionResolver extends AbstractComponent {
throw new IndexClosedException(index.getIndex());
} else {
if (options.forbidClosedIndices() == false) {
concreteIndices.add(index.getIndex().getName());
concreteIndices.add(index.getIndex());
}
}
} else if (index.getState() == IndexMetaData.State.OPEN) {
concreteIndices.add(index.getIndex().getName());
concreteIndices.add(index.getIndex());
} else {
throw new IllegalStateException("index state [" + index.getState() + "] not supported");
}
@ -185,7 +222,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
infe.setResources("index_expression", indexExpressions);
throw infe;
}
return concreteIndices.toArray(new String[concreteIndices.size()]);
return concreteIndices.toArray(new Index[concreteIndices.size()]);
}
/**
@ -200,9 +237,9 @@ public class IndexNameExpressionResolver extends AbstractComponent {
* @throws IllegalArgumentException if the index resolution lead to more than one index
* @return the concrete index obtained as a result of the index resolution
*/
public String concreteSingleIndex(ClusterState state, IndicesRequest request) {
public Index concreteSingleIndex(ClusterState state, IndicesRequest request) {
String indexExpression = request.indices() != null && request.indices().length > 0 ? request.indices()[0] : null;
String[] indices = concreteIndices(state, request.indicesOptions(), indexExpression);
Index[] indices = concreteIndices(state, request.indicesOptions(), indexExpression);
if (indices.length != 1) {
throw new IllegalArgumentException("unable to return a single index as the index and options provided got resolved to multiple indices");
}
@ -867,7 +904,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
* Returns <code>true</code> iff the given expression resolves to the given index name otherwise <code>false</code>
*/
public final boolean matchesIndex(String indexName, String expression, ClusterState state) {
final String[] concreteIndices = concreteIndices(state, IndicesOptions.lenientExpandOpen(), expression);
final String[] concreteIndices = concreteIndexNames(state, IndicesOptions.lenientExpandOpen(), expression);
for (String index : concreteIndices) {
if (Regex.simpleMatch(index, indexName)) {
return true;

View File

@ -84,20 +84,10 @@ public class MappingMetaData extends AbstractDiffable<MappingMetaData> {
private static final FormatDateTimeFormatter EPOCH_MILLIS_PARSER = Joda.forPattern("epoch_millis");
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter,
Version version) throws TimestampParsingException {
public static String parseStringTimestamp(String timestampAsString, FormatDateTimeFormatter dateTimeFormatter) throws TimestampParsingException {
try {
// no need for unix timestamp parsing in 2.x
FormatDateTimeFormatter formatter = version.onOrAfter(Version.V_2_0_0_beta1) ? dateTimeFormatter : EPOCH_MILLIS_PARSER;
return Long.toString(formatter.parser().parseMillis(timestampAsString));
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));
} catch (RuntimeException e) {
if (version.before(Version.V_2_0_0_beta1)) {
try {
return Long.toString(dateTimeFormatter.parser().parseMillis(timestampAsString));
} catch (RuntimeException e1) {
throw new TimestampParsingException(timestampAsString, e1);
}
}
throw new TimestampParsingException(timestampAsString, e);
}
}

View File

@ -41,6 +41,7 @@ import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.loader.SettingsLoader;
import org.elasticsearch.common.xcontent.FromXContentBuilder;
@ -139,7 +140,8 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
}
public static final Setting<Boolean> SETTING_READ_ONLY_SETTING = Setting.boolSetting("cluster.blocks.read_only", false, true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> SETTING_READ_ONLY_SETTING =
Setting.boolSetting("cluster.blocks.read_only", false, Property.Dynamic, Property.NodeScope);
public static final ClusterBlock CLUSTER_READ_ONLY_BLOCK = new ClusterBlock(6, "cluster read-only (api)", false, false, RestStatus.FORBIDDEN, EnumSet.of(ClusterBlockLevel.WRITE, ClusterBlockLevel.METADATA_WRITE));
@ -230,7 +232,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
public boolean equalsAliases(MetaData other) {
for (ObjectCursor<IndexMetaData> cursor : other.indices().values()) {
IndexMetaData otherIndex = cursor.value;
IndexMetaData thisIndex= index(otherIndex.getIndex());
IndexMetaData thisIndex = index(otherIndex.getIndex());
if (thisIndex == null) {
return false;
}
@ -455,7 +457,28 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
}
public IndexMetaData index(Index index) {
return index(index.getName());
IndexMetaData metaData = index(index.getName());
if (metaData != null && metaData.getIndexUUID().equals(index.getUUID())) {
return metaData;
}
return null;
}
/**
* Returns the {@link IndexMetaData} for this index.
* @throws IndexNotFoundException if no metadata for this index is found
*/
public IndexMetaData getIndexSafe(Index index) {
IndexMetaData metaData = index(index.getName());
if (metaData != null) {
if(metaData.getIndexUUID().equals(index.getUUID())) {
return metaData;
}
throw new IndexNotFoundException(index,
new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID()
+ "] but got: [" + metaData.getIndexUUID() +"]"));
}
throw new IndexNotFoundException(index);
}
public ImmutableOpenMap<String, IndexMetaData> indices() {
@ -486,20 +509,13 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
return (T) customs.get(type);
}
public int totalNumberOfShards() {
public int getTotalNumberOfShards() {
return this.totalNumberOfShards;
}
public int getTotalNumberOfShards() {
return totalNumberOfShards();
}
public int numberOfShards() {
return this.numberOfShards;
}
public int getNumberOfShards() {
return numberOfShards();
return this.numberOfShards;
}
/**
@ -842,6 +858,19 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
return indices.get(index);
}
public IndexMetaData getSafe(Index index) {
IndexMetaData indexMetaData = get(index.getName());
if (indexMetaData != null) {
if(indexMetaData.getIndexUUID().equals(index.getUUID())) {
return indexMetaData;
}
throw new IndexNotFoundException(index,
new IllegalStateException("index uuid doesn't match expected: [" + index.getUUID()
+ "] but got: [" + indexMetaData.getIndexUUID() +"]"));
}
throw new IndexNotFoundException(index);
}
public Builder remove(String index) {
indices.remove(index);
return this;

View File

@ -35,14 +35,17 @@ import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.util.concurrent.FutureUtils;
import org.elasticsearch.common.util.set.Sets;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;
/**
*
@ -68,10 +71,9 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
}
public void deleteIndices(final Request request, final Listener userListener) {
Set<String> indices = Sets.newHashSet(request.indices);
final DeleteIndexListener listener = new DeleteIndexListener(userListener);
clusterService.submitStateUpdateTask("delete-index " + indices, new ClusterStateUpdateTask(Priority.URGENT) {
clusterService.submitStateUpdateTask("delete-index " + request.indices, new ClusterStateUpdateTask(Priority.URGENT) {
@Override
public TimeValue timeout() {
@ -85,23 +87,21 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
@Override
public ClusterState execute(final ClusterState currentState) {
final MetaData meta = currentState.metaData();
final Set<IndexMetaData> metaDatas = request.indices.stream().map(i -> meta.getIndexSafe(i)).collect(Collectors.toSet());
// Check if index deletion conflicts with any running snapshots
SnapshotsService.checkIndexDeletion(currentState, indices);
SnapshotsService.checkIndexDeletion(currentState, metaDatas);
final Set<Index> indices = request.indices;
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
MetaData.Builder metaDataBuilder = MetaData.builder(meta);
ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks());
for (final String index: indices) {
if (!currentState.metaData().hasConcreteIndex(index)) {
throw new IndexNotFoundException(index);
}
for (final Index index : indices) {
String indexName = index.getName();
logger.debug("[{}] deleting index", index);
routingTableBuilder.remove(index);
clusterBlocksBuilder.removeIndexBlocks(index);
metaDataBuilder.remove(index);
routingTableBuilder.remove(indexName);
clusterBlocksBuilder.removeIndexBlocks(indexName);
metaDataBuilder.remove(indexName);
}
// wait for events from all nodes that it has been removed from their respective metadata...
int count = currentState.nodes().size();
@ -112,7 +112,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
// this listener will be notified once we get back a notification based on the cluster state change below.
final NodeIndexDeletedAction.Listener nodeIndexDeleteListener = new NodeIndexDeletedAction.Listener() {
@Override
public void onNodeIndexDeleted(String deleted, String nodeId) {
public void onNodeIndexDeleted(Index deleted, String nodeId) {
if (indices.contains(deleted)) {
if (counter.decrementAndGet() == 0) {
listener.onResponse(new Response(true));
@ -122,7 +122,7 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
}
@Override
public void onNodeIndexStoreDeleted(String deleted, String nodeId) {
public void onNodeIndexStoreDeleted(Index deleted, String nodeId) {
if (indices.contains(deleted)) {
if (counter.decrementAndGet() == 0) {
listener.onResponse(new Response(true));
@ -187,12 +187,12 @@ public class MetaDataDeleteIndexService extends AbstractComponent {
public static class Request {
final String[] indices;
final Set<Index> indices;
TimeValue timeout = TimeValue.timeValueSeconds(10);
TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT;
public Request(String[] indices) {
public Request(Set<Index> indices) {
this.indices = indices;
}

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.Priority;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.RestoreService;
@ -82,15 +83,11 @@ public class MetaDataIndexStateService extends AbstractComponent {
@Override
public ClusterState execute(ClusterState currentState) {
Set<String> indicesToClose = new HashSet<>();
for (String index : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
Set<IndexMetaData> indicesToClose = new HashSet<>();
for (Index index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
if (indexMetaData.getState() != IndexMetaData.State.CLOSE) {
indicesToClose.add(index);
indicesToClose.add(indexMetaData);
}
}
@ -102,22 +99,22 @@ public class MetaDataIndexStateService extends AbstractComponent {
RestoreService.checkIndexClosing(currentState, indicesToClose);
// Check if index closing conflicts with any running snapshots
SnapshotsService.checkIndexClosing(currentState, indicesToClose);
logger.info("closing indices [{}]", indicesAsString);
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
.blocks(currentState.blocks());
for (String index : indicesToClose) {
mdBuilder.put(IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.CLOSE));
blocksBuilder.addIndexBlock(index, INDEX_CLOSED_BLOCK);
for (IndexMetaData openIndexMetadata : indicesToClose) {
final String indexName = openIndexMetadata.getIndex().getName();
mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE));
blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable());
for (String index : indicesToClose) {
rtBuilder.remove(index);
for (IndexMetaData index : indicesToClose) {
rtBuilder.remove(index.getIndex().getName());
}
RoutingAllocation.Result routingResult = allocationService.reroute(
@ -143,14 +140,11 @@ public class MetaDataIndexStateService extends AbstractComponent {
@Override
public ClusterState execute(ClusterState currentState) {
List<String> indicesToOpen = new ArrayList<>();
for (String index : request.indices()) {
IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
List<IndexMetaData> indicesToOpen = new ArrayList<>();
for (Index index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
if (indexMetaData.getState() != IndexMetaData.State.OPEN) {
indicesToOpen.add(index);
indicesToOpen.add(indexMetaData);
}
}
@ -163,20 +157,21 @@ public class MetaDataIndexStateService extends AbstractComponent {
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder()
.blocks(currentState.blocks());
for (String index : indicesToOpen) {
IndexMetaData indexMetaData = IndexMetaData.builder(currentState.metaData().index(index)).state(IndexMetaData.State.OPEN).build();
for (IndexMetaData closedMetaData : indicesToOpen) {
final String indexName = closedMetaData.getIndex().getName();
IndexMetaData indexMetaData = IndexMetaData.builder(closedMetaData).state(IndexMetaData.State.OPEN).build();
// The index might be closed because we couldn't import it due to old incompatible version
// We need to check that this index can be upgraded to the current version
indexMetaData = metaDataIndexUpgradeService.upgradeIndexMetaData(indexMetaData);
mdBuilder.put(indexMetaData, true);
blocksBuilder.removeIndexBlock(index, INDEX_CLOSED_BLOCK);
blocksBuilder.removeIndexBlock(indexName, INDEX_CLOSED_BLOCK);
}
ClusterState updatedState = ClusterState.builder(currentState).metaData(mdBuilder).blocks(blocksBuilder).build();
RoutingTable.Builder rtBuilder = RoutingTable.builder(updatedState.routingTable());
for (String index : indicesToOpen) {
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().index(index));
for (IndexMetaData index : indicesToOpen) {
rtBuilder.addAsFromCloseToOpen(updatedState.metaData().getIndexSafe(index.getIndex()));
}
RoutingAllocation.Result routingResult = allocationService.reroute(

View File

@ -31,6 +31,7 @@ import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.inject.Inject;
@ -216,31 +217,23 @@ public class MetaDataMappingService extends AbstractComponent {
try {
// precreate incoming indices;
for (PutMappingClusterStateUpdateRequest request : tasks) {
final List<Index> indices = new ArrayList<>(request.indices().length);
try {
for (String index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().index(index);
if (indexMetaData != null) {
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
// if the index does not exists we create it once, add all types to the mapper service and
// close it later once we are done with mapping update
indicesToClose.add(indexMetaData.getIndex());
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
for (Index index : request.indices()) {
final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index);
if (indicesService.hasIndex(indexMetaData.getIndex()) == false) {
// if the index does not exists we create it once, add all types to the mapper service and
// close it later once we are done with mapping update
indicesToClose.add(indexMetaData.getIndex());
IndexService indexService = indicesService.createIndex(nodeServicesProvider, indexMetaData,
Collections.emptyList());
// add mappings for all types, we need them for cross-type validation
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
indexService.mapperService().merge(mapping.value.type(), mapping.value.source(),
MapperService.MergeReason.MAPPING_RECOVERY, request.updateAllTypes());
}
indices.add(indexMetaData.getIndex());
} else {
// we didn't find the index in the clusterstate - maybe it was deleted
// NOTE: this doesn't fail the entire batch only the current PutMapping request we are processing
throw new IndexNotFoundException(index);
}
}
currentState = applyRequest(currentState, request, indices);
currentState = applyRequest(currentState, request);
builder.success(request);
} catch (Throwable t) {
builder.failure(request, t);
@ -254,13 +247,20 @@ public class MetaDataMappingService extends AbstractComponent {
}
}
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request,
List<Index> indices) throws IOException {
private ClusterState applyRequest(ClusterState currentState, PutMappingClusterStateUpdateRequest request) throws IOException {
String mappingType = request.type();
CompressedXContent mappingUpdateSource = new CompressedXContent(request.source());
final MetaData metaData = currentState.metaData();
for (Index index : indices) {
final List<Tuple<IndexService, IndexMetaData>> updateList = new ArrayList<>();
for (Index index : request.indices()) {
IndexService indexService = indicesService.indexServiceSafe(index);
// IMPORTANT: always get the metadata from the state since it get's batched
// and if we pull it from the indexService we might miss an update etc.
final IndexMetaData indexMetaData = currentState.getMetaData().getIndexSafe(index);
// this is paranoia... just to be sure we use the exact same indexService and metadata tuple on the update that
// we used for the validation, it makes this mechanism little less scary (a little)
updateList.add(new Tuple<>(indexService, indexMetaData));
// try and parse it (no need to add it here) so we can bail early in case of parsing exception
DocumentMapper newMapper;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(request.type());
@ -281,7 +281,6 @@ public class MetaDataMappingService extends AbstractComponent {
// and a put mapping api call, so we don't which type did exist before.
// Also the order of the mappings may be backwards.
if (newMapper.parentFieldMapper().active()) {
IndexMetaData indexMetaData = metaData.index(index);
for (ObjectCursor<MappingMetaData> mapping : indexMetaData.getMappings().values()) {
if (newMapper.parentFieldMapper().type().equals(mapping.value.type())) {
throw new IllegalArgumentException("can't add a _parent field that points to an already existing type");
@ -302,13 +301,12 @@ public class MetaDataMappingService extends AbstractComponent {
throw new InvalidTypeNameException("Document mapping type name can't start with '_'");
}
MetaData.Builder builder = MetaData.builder(metaData);
for (Index index : indices) {
for (Tuple<IndexService, IndexMetaData> toUpdate : updateList) {
// do the actual merge here on the master, and update the mapping source
IndexService indexService = indicesService.indexService(index);
if (indexService == null) { // TODO this seems impossible given we use indexServiceSafe above
continue;
}
// we use the exact same indexService and metadata we used to validate above here to actually apply the update
final IndexService indexService = toUpdate.v1();
final IndexMetaData indexMetaData = toUpdate.v2();
final Index index = indexMetaData.getIndex();
CompressedXContent existingSource = null;
DocumentMapper existingMapper = indexService.mapperService().documentMapper(mappingType);
if (existingMapper != null) {
@ -323,24 +321,20 @@ public class MetaDataMappingService extends AbstractComponent {
} else {
// use the merged mapping source
if (logger.isDebugEnabled()) {
logger.debug("[{}] update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
logger.debug("{} update_mapping [{}] with source [{}]", index, mergedMapper.type(), updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] update_mapping [{}]", index, mergedMapper.type());
logger.info("{} update_mapping [{}]", index, mergedMapper.type());
}
}
} else {
if (logger.isDebugEnabled()) {
logger.debug("[{}] create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
logger.debug("{} create_mapping [{}] with source [{}]", index, mappingType, updatedSource);
} else if (logger.isInfoEnabled()) {
logger.info("[{}] create_mapping [{}]", index, mappingType);
logger.info("{} create_mapping [{}]", index, mappingType);
}
}
IndexMetaData indexMetaData = metaData.index(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
IndexMetaData.Builder indexMetaDataBuilder = IndexMetaData.builder(indexMetaData);
// Mapping updates on a single type may have side-effects on other types so we need to
// update mapping metadata on all types

View File

@ -23,7 +23,6 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsClusterStateUpdateRequest;
import org.elasticsearch.action.admin.indices.upgrade.post.UpgradeSettingsClusterStateUpdateRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterService;
@ -43,7 +42,7 @@ import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.Index;
import java.util.ArrayList;
import java.util.HashMap;
@ -86,7 +85,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
// we will want to know this for translating "all" to a number
final int dataNodeCount = event.state().nodes().dataNodes().size();
Map<Integer, List<String>> nrReplicasChanged = new HashMap<>();
Map<Integer, List<Index>> nrReplicasChanged = new HashMap<>();
// we need to do this each time in case it was changed by update settings
for (final IndexMetaData indexMetaData : event.state().metaData()) {
AutoExpandReplicas autoExpandReplicas = IndexMetaData.INDEX_AUTO_EXPAND_REPLICAS_SETTING.get(indexMetaData.getSettings());
@ -117,7 +116,7 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
nrReplicasChanged.put(numberOfReplicas, new ArrayList<>());
}
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex().getName());
nrReplicasChanged.get(numberOfReplicas).add(indexMetaData.getIndex());
}
}
}
@ -126,25 +125,25 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
// update settings and kick of a reroute (implicit) for them to take effect
for (final Integer fNumberOfReplicas : nrReplicasChanged.keySet()) {
Settings settings = Settings.settingsBuilder().put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, fNumberOfReplicas).build();
final List<String> indices = nrReplicasChanged.get(fNumberOfReplicas);
final List<Index> indices = nrReplicasChanged.get(fNumberOfReplicas);
UpdateSettingsClusterStateUpdateRequest updateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(indices.toArray(new String[indices.size()])).settings(settings)
.indices(indices.toArray(new Index[indices.size()])).settings(settings)
.ackTimeout(TimeValue.timeValueMillis(0)) //no need to wait for ack here
.masterNodeTimeout(TimeValue.timeValueMinutes(10));
updateSettings(updateRequest, new ActionListener<ClusterStateUpdateResponse>() {
@Override
public void onResponse(ClusterStateUpdateResponse response) {
for (String index : indices) {
logger.info("[{}] auto expanded replicas to [{}]", index, fNumberOfReplicas);
for (Index index : indices) {
logger.info("{} auto expanded replicas to [{}]", index, fNumberOfReplicas);
}
}
@Override
public void onFailure(Throwable t) {
for (String index : indices) {
logger.warn("[{}] fail to auto expand replicas to [{}]", index, fNumberOfReplicas);
for (Index index : indices) {
logger.warn("{} fail to auto expand replicas to [{}]", index, fNumberOfReplicas);
}
}
});
@ -188,16 +187,19 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
@Override
public ClusterState execute(ClusterState currentState) {
String[] actualIndices = indexNameExpressionResolver.concreteIndices(currentState, IndicesOptions.strictExpand(), request.indices());
RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable());
MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData());
// allow to change any settings to a close index, and only allow dynamic settings to be changed
// on an open index
Set<String> openIndices = new HashSet<>();
Set<String> closeIndices = new HashSet<>();
for (String index : actualIndices) {
if (currentState.metaData().index(index).getState() == IndexMetaData.State.OPEN) {
Set<Index> openIndices = new HashSet<>();
Set<Index> closeIndices = new HashSet<>();
final String[] actualIndices = new String[request.indices().length];
for (int i = 0; i < request.indices().length; i++) {
Index index = request.indices()[i];
actualIndices[i] = index.getName();
final IndexMetaData metaData = currentState.metaData().getIndexSafe(index);
if (metaData.getState() == IndexMetaData.State.OPEN) {
openIndices.add(index);
} else {
closeIndices.add(index);
@ -206,13 +208,13 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
if (closeIndices.size() > 0 && closedSettings.get(IndexMetaData.SETTING_NUMBER_OF_REPLICAS) != null) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Can't update [%s] on closed indices [%s] - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
"Can't update [%s] on closed indices %s - can leave index in an unopenable state", IndexMetaData.SETTING_NUMBER_OF_REPLICAS,
closeIndices
));
}
if (!skippedSettigns.getAsMap().isEmpty() && !openIndices.isEmpty()) {
throw new IllegalArgumentException(String.format(Locale.ROOT,
"Can't update non dynamic settings[%s] for open indices [%s]",
"Can't update non dynamic settings [%s] for open indices %s",
skippedSettigns.getAsMap().keySet(),
openIndices
));
@ -232,28 +234,22 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
maybeUpdateClusterBlock(actualIndices, blocks, IndexMetaData.INDEX_READ_BLOCK, IndexMetaData.INDEX_BLOCKS_READ_SETTING, openSettings);
if (!openIndices.isEmpty()) {
for (String index : openIndices) {
IndexMetaData indexMetaData = metaDataBuilder.get(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
for (Index index : openIndices) {
IndexMetaData indexMetaData = metaDataBuilder.getSafe(index);
Settings.Builder updates = Settings.builder();
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index)) {
if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index.getName())) {
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
}
}
}
if (!closeIndices.isEmpty()) {
for (String index : closeIndices) {
IndexMetaData indexMetaData = metaDataBuilder.get(index);
if (indexMetaData == null) {
throw new IndexNotFoundException(index);
}
for (Index index : closeIndices) {
IndexMetaData indexMetaData = metaDataBuilder.getSafe(index);
Settings.Builder updates = Settings.builder();
Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings());
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index)) {
if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index.getName())) {
metaDataBuilder.put(IndexMetaData.builder(indexMetaData).settings(indexSettings));
}
}
@ -265,11 +261,11 @@ public class MetaDataUpdateSettingsService extends AbstractComponent implements
// now, reroute in case things change that require it (like number of replicas)
RoutingAllocation.Result routingResult = allocationService.reroute(updatedState, "settings update");
updatedState = ClusterState.builder(updatedState).routingResult(routingResult).build();
for (String index : openIndices) {
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
for (Index index : openIndices) {
indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings());
}
for (String index : closeIndices) {
indexScopedSettings.dryRun(updatedState.metaData().index(index).getSettings());
for (Index index : closeIndices) {
indexScopedSettings.dryRun(updatedState.metaData().getIndexSafe(index).getSettings());
}
return updatedState;
}

View File

@ -25,6 +25,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
@ -40,7 +41,7 @@ public class DiscoveryNodeService extends AbstractComponent {
public static final Setting<Long> NODE_ID_SEED_SETTING =
// don't use node.id.seed so it won't be seen as an attribute
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, Property.NodeScope);
private final List<CustomAttributesProvider> customAttributesProviders = new CopyOnWriteArrayList<>();
private final Version version;

View File

@ -313,7 +313,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
@Override
public IndexRoutingTable readFrom(StreamInput in) throws IOException {
Index index = Index.readIndex(in);
Index index = new Index(in);
Builder builder = new Builder(index);
int size = in.readVInt();

View File

@ -584,7 +584,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
}
public static IndexShardRoutingTable readFrom(StreamInput in) throws IOException {
Index index = Index.readIndex(in);
Index index = new Index(in);
return readFromThin(in, index);
}

View File

@ -328,7 +328,7 @@ public final class ShardRouting implements Streamable, ToXContent {
@Override
public void readFrom(StreamInput in) throws IOException {
readFrom(in, Index.readIndex(in), in.readVInt());
readFrom(in, new Index(in), in.readVInt());
}
/**

View File

@ -29,6 +29,7 @@ import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.joda.FormatDateTimeFormatter;
import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
@ -44,7 +45,9 @@ public class UnassignedInfo implements ToXContent, Writeable<UnassignedInfo> {
public static final FormatDateTimeFormatter DATE_TIME_FORMATTER = Joda.forPattern("dateOptionalTime");
private static final TimeValue DEFAULT_DELAYED_NODE_LEFT_TIMEOUT = TimeValue.timeValueMinutes(1);
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING = Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, true, Setting.Scope.INDEX);
public static final Setting<TimeValue> INDEX_DELAYED_NODE_LEFT_TIMEOUT_SETTING =
Setting.timeSetting("index.unassigned.node_left.delayed_timeout", DEFAULT_DELAYED_NODE_LEFT_TIMEOUT, Property.Dynamic,
Property.IndexScope);
/**
* Reason why the shard is in unassigned state.

View File

@ -320,7 +320,7 @@ public class AllocationService extends AbstractComponent {
public static void updateLeftDelayOfUnassignedShards(RoutingAllocation allocation, Settings settings) {
for (ShardRouting shardRouting : allocation.routingNodes().unassigned()) {
final MetaData metaData = allocation.metaData();
final IndexMetaData indexMetaData = metaData.index(shardRouting.index());
final IndexMetaData indexMetaData = metaData.getIndexSafe(shardRouting.index());
shardRouting.unassignedInfo().updateDelay(allocation.getCurrentNanoTime(), settings, indexMetaData.getSettings());
}
}
@ -340,7 +340,6 @@ public class AllocationService extends AbstractComponent {
changed |= failReplicasForUnassignedPrimary(allocation, shardEntry);
ShardRouting candidate = allocation.routingNodes().activeReplica(shardEntry);
if (candidate != null) {
IndexMetaData index = allocation.metaData().index(candidate.index());
routingNodes.swapPrimaryFlag(shardEntry, candidate);
if (candidate.relocatingNodeId() != null) {
changed = true;
@ -355,6 +354,7 @@ public class AllocationService extends AbstractComponent {
}
}
}
IndexMetaData index = allocation.metaData().getIndexSafe(candidate.index());
if (IndexMetaData.isIndexUsingShadowReplicas(index.getSettings())) {
routingNodes.reinitShadowPrimary(candidate);
changed = true;

View File

@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.PriorityComparator;
@ -67,9 +68,13 @@ import static org.elasticsearch.cluster.routing.ShardRoutingState.RELOCATING;
*/
public class BalancedShardsAllocator extends AbstractComponent implements ShardsAllocator {
public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, true, Setting.Scope.CLUSTER);
public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, true, Setting.Scope.CLUSTER);
public static final Setting<Float> THRESHOLD_SETTING = Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f, true, Setting.Scope.CLUSTER);
public static final Setting<Float> INDEX_BALANCE_FACTOR_SETTING =
Setting.floatSetting("cluster.routing.allocation.balance.index", 0.55f, Property.Dynamic, Property.NodeScope);
public static final Setting<Float> SHARD_BALANCE_FACTOR_SETTING =
Setting.floatSetting("cluster.routing.allocation.balance.shard", 0.45f, Property.Dynamic, Property.NodeScope);
public static final Setting<Float> THRESHOLD_SETTING =
Setting.floatSetting("cluster.routing.allocation.balance.threshold", 1.0f, 0.0f,
Property.Dynamic, Property.NodeScope);
private volatile WeightFunction weightFunction;
private volatile float threshold;
@ -213,7 +218,7 @@ public class BalancedShardsAllocator extends AbstractComponent implements Shards
this.threshold = threshold;
this.routingNodes = allocation.routingNodes();
metaData = routingNodes.metaData();
avgShardsPerNode = ((float) metaData.totalNumberOfShards()) / routingNodes.size();
avgShardsPerNode = ((float) metaData.getTotalNumberOfShards()) / routingNodes.size();
buildModelFromAssigned();
}

View File

@ -112,7 +112,7 @@ public class AllocateStalePrimaryAllocationCommand extends BasePrimaryAllocation
"allocating an empty primary for [" + index + "][" + shardId + "] can result in data loss. Please confirm by setting the accept_data_loss parameter to true");
}
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName());
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
if (shardRouting.allocatedPostIndexCreate(indexMetaData) == false) {
return explainOrThrowRejectedCommand(explain, allocation,
"trying to allocate an existing primary shard [" + index + "][" + shardId + "], while no such shard has ever been active");

View File

@ -28,6 +28,7 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.HashMap;
@ -77,8 +78,11 @@ public class AwarenessAllocationDecider extends AllocationDecider {
public static final String NAME = "awareness";
public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING = new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , true, Setting.Scope.CLUSTER);
public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.awareness.force.", true, Setting.Scope.CLUSTER);
public static final Setting<String[]> CLUSTER_ROUTING_ALLOCATION_AWARENESS_ATTRIBUTE_SETTING =
new Setting<>("cluster.routing.allocation.awareness.attributes", "", Strings::splitStringByCommaToArray , Property.Dynamic,
Property.NodeScope);
public static final Setting<Settings> CLUSTER_ROUTING_ALLOCATION_AWARENESS_FORCE_GROUP_SETTING =
Setting.groupSetting("cluster.routing.allocation.awareness.force.", Property.Dynamic, Property.NodeScope);
private String[] awarenessAttributes;
@ -149,7 +153,7 @@ public class AwarenessAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "no allocation awareness enabled");
}
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.index());
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
int shardCount = indexMetaData.getNumberOfReplicas() + 1; // 1 for primary
for (String awarenessAttribute : awarenessAttributes) {
// the node the shard exists on must be associated with an awareness attribute

View File

@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.Locale;
@ -48,7 +49,9 @@ import java.util.Locale;
public class ClusterRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "cluster_rebalance";
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING = new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT), ClusterRebalanceType::parseString, true, Setting.Scope.CLUSTER);
public static final Setting<ClusterRebalanceType> CLUSTER_ROUTING_ALLOCATION_ALLOW_REBALANCE_SETTING =
new Setting<>("cluster.routing.allocation.allow_rebalance", ClusterRebalanceType.INDICES_ALL_ACTIVE.name().toLowerCase(Locale.ROOT),
ClusterRebalanceType::parseString, Property.Dynamic, Property.NodeScope);
/**
* An enum representation for the configured re-balance type.

View File

@ -24,6 +24,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -42,7 +43,9 @@ public class ConcurrentRebalanceAllocationDecider extends AllocationDecider {
public static final String NAME = "concurrent_rebalance";
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING = Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1, true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_CLUSTER_CONCURRENT_REBALANCE_SETTING =
Setting.intSetting("cluster.routing.allocation.cluster_concurrent_rebalance", 2, -1,
Property.Dynamic, Property.NodeScope);
private volatile int clusterConcurrentRebalance;
@Inject

View File

@ -38,6 +38,7 @@ import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.RatioValue;
@ -81,11 +82,22 @@ public class DiskThresholdDecider extends AllocationDecider {
private volatile boolean enabled;
private volatile TimeValue rerouteInterval;
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, true, Setting.Scope.CLUSTER);
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"), true, Setting.Scope.CLUSTER);
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING = new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%", (s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"), true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING = Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true, true, Setting.Scope.CLUSTER);;
public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60), true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_DISK_THRESHOLD_ENABLED_SETTING =
Setting.boolSetting("cluster.routing.allocation.disk.threshold_enabled", true, Property.Dynamic, Property.NodeScope);
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_LOW_DISK_WATERMARK_SETTING =
new Setting<>("cluster.routing.allocation.disk.watermark.low", "85%",
(s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.low"),
Property.Dynamic, Property.NodeScope);
public static final Setting<String> CLUSTER_ROUTING_ALLOCATION_HIGH_DISK_WATERMARK_SETTING =
new Setting<>("cluster.routing.allocation.disk.watermark.high", "90%",
(s) -> validWatermarkSetting(s, "cluster.routing.allocation.disk.watermark.high"),
Property.Dynamic, Property.NodeScope);
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_INCLUDE_RELOCATIONS_SETTING =
Setting.boolSetting("cluster.routing.allocation.disk.include_relocations", true,
Property.Dynamic, Property.NodeScope);;
public static final Setting<TimeValue> CLUSTER_ROUTING_ALLOCATION_REROUTE_INTERVAL_SETTING =
Setting.positiveTimeSetting("cluster.routing.allocation.disk.reroute_interval", TimeValue.timeValueSeconds(60),
Property.Dynamic, Property.NodeScope);
/**
* Listens for a node to go over the high watermark and kicks off an empty
@ -330,7 +342,7 @@ public class DiskThresholdDecider extends AllocationDecider {
}
// a flag for whether the primary shard has been previously allocated
IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName());
IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
boolean primaryHasBeenAllocated = shardRouting.primary() && shardRouting.allocatedPostIndexCreate(indexMetaData);
// checks for exact byte comparisons

View File

@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.Locale;
@ -60,11 +61,19 @@ public class EnableAllocationDecider extends AllocationDecider {
public static final String NAME = "enable";
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.CLUSTER);
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING = new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse, true, Setting.Scope.INDEX);
public static final Setting<Allocation> CLUSTER_ROUTING_ALLOCATION_ENABLE_SETTING =
new Setting<>("cluster.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse,
Property.Dynamic, Property.NodeScope);
public static final Setting<Allocation> INDEX_ROUTING_ALLOCATION_ENABLE_SETTING =
new Setting<>("index.routing.allocation.enable", Allocation.ALL.name(), Allocation::parse,
Property.Dynamic, Property.IndexScope);
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.CLUSTER);
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING = new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse, true, Setting.Scope.INDEX);
public static final Setting<Rebalance> CLUSTER_ROUTING_REBALANCE_ENABLE_SETTING =
new Setting<>("cluster.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse,
Property.Dynamic, Property.NodeScope);
public static final Setting<Rebalance> INDEX_ROUTING_REBALANCE_ENABLE_SETTING =
new Setting<>("index.routing.rebalance.enable", Rebalance.ALL.name(), Rebalance::parse,
Property.Dynamic, Property.IndexScope);
private volatile Rebalance enableRebalance;
private volatile Allocation enableAllocation;
@ -92,7 +101,7 @@ public class EnableAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "allocation disabling is ignored");
}
final IndexMetaData indexMetaData = allocation.metaData().index(shardRouting.getIndexName());
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(shardRouting.index());
final Allocation enable;
if (INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.exists(indexMetaData.getSettings())) {
enable = INDEX_ROUTING_ALLOCATION_ENABLE_SETTING.get(indexMetaData.getSettings());
@ -127,7 +136,7 @@ public class EnableAllocationDecider extends AllocationDecider {
return allocation.decision(Decision.YES, NAME, "rebalance disabling is ignored");
}
Settings indexSettings = allocation.routingNodes().metaData().index(shardRouting.index()).getSettings();
Settings indexSettings = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()).getSettings();
final Rebalance enable;
if (INDEX_ROUTING_REBALANCE_ENABLE_SETTING.exists(indexSettings)) {
enable = INDEX_ROUTING_REBALANCE_ENABLE_SETTING.get(indexSettings);

View File

@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import static org.elasticsearch.cluster.node.DiscoveryNodeFilters.OpType.AND;
@ -60,9 +61,12 @@ public class FilterAllocationDecider extends AllocationDecider {
public static final String NAME = "filter";
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.require.", true, Setting.Scope.CLUSTER);
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.include.", true, Setting.Scope.CLUSTER);
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING = Setting.groupSetting("cluster.routing.allocation.exclude.", true, Setting.Scope.CLUSTER);
public static final Setting<Settings> CLUSTER_ROUTING_REQUIRE_GROUP_SETTING =
Setting.groupSetting("cluster.routing.allocation.require.", Property.Dynamic, Property.NodeScope);
public static final Setting<Settings> CLUSTER_ROUTING_INCLUDE_GROUP_SETTING =
Setting.groupSetting("cluster.routing.allocation.include.", Property.Dynamic, Property.NodeScope);
public static final Setting<Settings> CLUSTER_ROUTING_EXCLUDE_GROUP_SETTING =
Setting.groupSetting("cluster.routing.allocation.exclude.", Property.Dynamic, Property.NodeScope);
private volatile DiscoveryNodeFilters clusterRequireFilters;
private volatile DiscoveryNodeFilters clusterIncludeFilters;
@ -98,7 +102,7 @@ public class FilterAllocationDecider extends AllocationDecider {
Decision decision = shouldClusterFilter(node, allocation);
if (decision != null) return decision;
decision = shouldIndexFilter(allocation.routingNodes().metaData().index(shardRouting.index()), node, allocation);
decision = shouldIndexFilter(allocation.routingNodes().metaData().getIndexSafe(shardRouting.index()), node, allocation);
if (decision != null) return decision;
return allocation.decision(Decision.YES, NAME, "node passes include/exclude/require filters");

View File

@ -27,6 +27,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -59,13 +60,17 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
* Controls the maximum number of shards per index on a single Elasticsearch
* node. Negative values are interpreted as unlimited.
*/
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.INDEX);
public static final Setting<Integer> INDEX_TOTAL_SHARDS_PER_NODE_SETTING =
Setting.intSetting("index.routing.allocation.total_shards_per_node", -1, -1,
Property.Dynamic, Property.IndexScope);
/**
* Controls the maximum number of shards per node on a global level.
* Negative values are interpreted as unlimited.
*/
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING = Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1, true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_TOTAL_SHARDS_PER_NODE_SETTING =
Setting.intSetting("cluster.routing.allocation.total_shards_per_node", -1, -1,
Property.Dynamic, Property.NodeScope);
@Inject
@ -81,7 +86,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
@Override
public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
// Capture the limit here in case it changes during this method's
// execution
@ -118,7 +123,7 @@ public class ShardsLimitAllocationDecider extends AllocationDecider {
@Override
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
IndexMetaData indexMd = allocation.routingNodes().metaData().index(shardRouting.index());
IndexMetaData indexMd = allocation.routingNodes().metaData().getIndexSafe(shardRouting.index());
final int indexShardLimit = INDEX_TOTAL_SHARDS_PER_NODE_SETTING.get(indexMd.getSettings(), settings);
// Capture the limit here in case it changes during this method's
// execution

View File

@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -39,7 +40,9 @@ public class SnapshotInProgressAllocationDecider extends AllocationDecider {
/**
* Disables relocation of shards that are currently being snapshotted.
*/
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING = Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false, true, Setting.Scope.CLUSTER);
public static final Setting<Boolean> CLUSTER_ROUTING_ALLOCATION_SNAPSHOT_RELOCATION_ENABLED_SETTING =
Setting.boolSetting("cluster.routing.allocation.snapshot.relocation_enabled", false,
Property.Dynamic, Property.NodeScope);
private volatile boolean enableRelocation = false;

View File

@ -25,6 +25,7 @@ import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
/**
@ -50,10 +51,25 @@ public class ThrottlingAllocationDecider extends AllocationDecider {
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES = 2;
public static final int DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES = 4;
public static final String NAME = "throttling";
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_recoveries", Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"), true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING = Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries", DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0, true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"), true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING = new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries", (s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s), (s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"), true, Setting.Scope.CLUSTER);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING =
new Setting<>("cluster.routing.allocation.node_concurrent_recoveries",
Integer.toString(DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES),
(s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_recoveries"),
Property.Dynamic, Property.NodeScope);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES_SETTING =
Setting.intSetting("cluster.routing.allocation.node_initial_primaries_recoveries",
DEFAULT_CLUSTER_ROUTING_ALLOCATION_NODE_INITIAL_PRIMARIES_RECOVERIES, 0,
Property.Dynamic, Property.NodeScope);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_INCOMING_RECOVERIES_SETTING =
new Setting<>("cluster.routing.allocation.node_concurrent_incoming_recoveries",
(s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s),
(s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_incoming_recoveries"),
Property.Dynamic, Property.NodeScope);
public static final Setting<Integer> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_OUTGOING_RECOVERIES_SETTING =
new Setting<>("cluster.routing.allocation.node_concurrent_outgoing_recoveries",
(s) -> CLUSTER_ROUTING_ALLOCATION_NODE_CONCURRENT_RECOVERIES_SETTING.getRaw(s),
(s) -> Setting.parseInt(s, 0, "cluster.routing.allocation.node_concurrent_outgoing_recoveries"),
Property.Dynamic, Property.NodeScope);
private volatile int primariesInitialRecoveries;

View File

@ -50,6 +50,7 @@ import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.unit.TimeValue;
@ -89,7 +90,9 @@ import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadF
*/
public class InternalClusterService extends AbstractLifecycleComponent<ClusterService> implements ClusterService {
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING = Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30), true, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> CLUSTER_SERVICE_SLOW_TASK_LOGGING_THRESHOLD_SETTING =
Setting.positiveTimeSetting("cluster.service.slow_task_logging_threshold", TimeValue.timeValueSeconds(30),
Property.Dynamic, Property.NodeScope);
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
private final ThreadPool threadPool;

View File

@ -554,10 +554,9 @@ public abstract class StreamInput extends InputStream {
}
}
public <T extends Writeable> T readOptionalWritable(Writeable.IOFunction<StreamInput, T> supplier) throws IOException {
public <T extends Writeable> T readOptionalWritable(Writeable.IOFunction<StreamInput, T> provider) throws IOException {
if (readBoolean()) {
return supplier.apply(this);
return provider.apply(this);
} else {
return null;
}

View File

@ -38,16 +38,14 @@ public interface Writeable<T> extends StreamableReader<T> {
*/
void writeTo(StreamOutput out) throws IOException;
@FunctionalInterface
interface IOFunction<T, R> {
/**
* Applies this function to the given argument.
*
* @param t the function argument
* @return the function result
*/
R apply(T t) throws IOException;
}
@FunctionalInterface
interface IOFunction<T, R> {
/**
* Applies this function to the given argument.
*
* @param t the function argument
* @return the function result
*/
R apply(T t) throws IOException;
}
}

View File

@ -21,6 +21,7 @@ package org.elasticsearch.common.logging;
import org.apache.log4j.Logger;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import java.util.Locale;
@ -30,9 +31,10 @@ import java.util.Locale;
public abstract class ESLoggerFactory {
public static final Setting<LogLevel> LOG_DEFAULT_LEVEL_SETTING =
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, false, Setting.Scope.CLUSTER);
new Setting<>("logger.level", LogLevel.INFO.name(), LogLevel::parse, Property.NodeScope);
public static final Setting<LogLevel> LOG_LEVEL_SETTING =
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse, true, Setting.Scope.CLUSTER);
Setting.prefixKeySetting("logger.", LogLevel.INFO.name(), LogLevel::parse,
Property.Dynamic, Property.NodeScope);
public static ESLogger getLogger(String prefix, String name) {
prefix = prefix == null ? null : prefix.intern();

View File

@ -134,7 +134,7 @@ public class MultiPhrasePrefixQuery extends Query {
if (termArrays.isEmpty()) {
return new MatchNoDocsQuery();
}
MultiPhraseQuery query = new MultiPhraseQuery();
MultiPhraseQuery.Builder query = new MultiPhraseQuery.Builder();
query.setSlop(slop);
int sizeMinus1 = termArrays.size() - 1;
for (int i = 0; i < sizeMinus1; i++) {
@ -153,7 +153,7 @@ public class MultiPhrasePrefixQuery extends Query {
return Queries.newMatchNoDocsQuery();
}
query.add(terms.toArray(Term.class), position);
return query.rewrite(reader);
return query.build();
}
private void getPrefixTerms(ObjectHashSet<Term> terms, final Term prefix, final IndexReader reader) throws IOException {

View File

@ -28,8 +28,8 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.settings.Setting.Scope;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.http.HttpServer;
import org.elasticsearch.http.HttpServerTransport;
@ -155,10 +155,11 @@ public class NetworkModule extends AbstractModule {
public static final String LOCAL_TRANSPORT = "local";
public static final String NETTY_TRANSPORT = "netty";
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString("http.type", false, Scope.CLUSTER);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, false, Scope.CLUSTER);
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING = Setting.simpleString("transport.service.type", false, Scope.CLUSTER);
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", false, Scope.CLUSTER);
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString("http.type", Property.NodeScope);
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
Setting.simpleString("transport.service.type", Property.NodeScope);
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString("transport.type", Property.NodeScope);

View File

@ -22,6 +22,7 @@ package org.elasticsearch.common.network;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
@ -34,6 +35,7 @@ import java.util.HashSet;
import java.util.List;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.TimeUnit;
import java.util.function.Function;
/**
*
@ -43,24 +45,33 @@ public class NetworkService extends AbstractComponent {
/** By default, we bind to loopback interfaces */
public static final String DEFAULT_NETWORK_HOST = "_local_";
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING = Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST),
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING = Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING = Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING,
s -> s, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, false, Setting.Scope.CLUSTER);
public static final Setting<List<String>> GLOBAL_NETWORK_HOST_SETTING =
Setting.listSetting("network.host", Arrays.asList(DEFAULT_NETWORK_HOST), Function.identity(), Property.NodeScope);
public static final Setting<List<String>> GLOBAL_NETWORK_BINDHOST_SETTING =
Setting.listSetting("network.bind_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope);
public static final Setting<List<String>> GLOBAL_NETWORK_PUBLISHHOST_SETTING =
Setting.listSetting("network.publish_host", GLOBAL_NETWORK_HOST_SETTING, Function.identity(), Property.NodeScope);
public static final Setting<Boolean> NETWORK_SERVER = Setting.boolSetting("network.server", true, Property.NodeScope);
public static final class TcpSettings {
public static final Setting<Boolean> TCP_NO_DELAY = Setting.boolSetting("network.tcp.no_delay", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_KEEP_ALIVE = Setting.boolSetting("network.tcp.keep_alive", true, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_REUSE_ADDRESS = Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE = Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING = Setting.boolSetting("network.tcp.blocking", false, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_SERVER = Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT = Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, false, Setting.Scope.CLUSTER);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT = Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), false, Setting.Scope.CLUSTER);
public static final Setting<Boolean> TCP_NO_DELAY =
Setting.boolSetting("network.tcp.no_delay", true, Property.NodeScope);
public static final Setting<Boolean> TCP_KEEP_ALIVE =
Setting.boolSetting("network.tcp.keep_alive", true, Property.NodeScope);
public static final Setting<Boolean> TCP_REUSE_ADDRESS =
Setting.boolSetting("network.tcp.reuse_address", NetworkUtils.defaultReuseAddress(), Property.NodeScope);
public static final Setting<ByteSizeValue> TCP_SEND_BUFFER_SIZE =
Setting.byteSizeSetting("network.tcp.send_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<ByteSizeValue> TCP_RECEIVE_BUFFER_SIZE =
Setting.byteSizeSetting("network.tcp.receive_buffer_size", new ByteSizeValue(-1), Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING =
Setting.boolSetting("network.tcp.blocking", false, Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING_SERVER =
Setting.boolSetting("network.tcp.blocking_server", TCP_BLOCKING, Property.NodeScope);
public static final Setting<Boolean> TCP_BLOCKING_CLIENT =
Setting.boolSetting("network.tcp.blocking_client", TCP_BLOCKING, Property.NodeScope);
public static final Setting<TimeValue> TCP_CONNECT_TIMEOUT =
Setting.timeSetting("network.tcp.connect_timeout", new TimeValue(30, TimeUnit.SECONDS), Property.NodeScope);
}
/**

View File

@ -44,19 +44,19 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
private final List<SettingUpdater<?>> settingUpdaters = new CopyOnWriteArrayList<>();
private final Map<String, Setting<?>> complexMatchers;
private final Map<String, Setting<?>> keySettings;
private final Setting.Scope scope;
private final Setting.Property scope;
private static final Pattern KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])*[-\\w]+$");
private static final Pattern GROUP_KEY_PATTERN = Pattern.compile("^(?:[-\\w]+[.])+$");
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Scope scope) {
protected AbstractScopedSettings(Settings settings, Set<Setting<?>> settingsSet, Setting.Property scope) {
super(settings);
this.lastSettingsApplied = Settings.EMPTY;
this.scope = scope;
Map<String, Setting<?>> complexMatchers = new HashMap<>();
Map<String, Setting<?>> keySettings = new HashMap<>();
for (Setting<?> setting : settingsSet) {
if (setting.getScope() != scope) {
throw new IllegalArgumentException("Setting must be a " + scope + " setting but was: " + setting.getScope());
if (setting.getProperties().contains(scope) == false) {
throw new IllegalArgumentException("Setting must be a " + scope + " setting but has: " + setting.getProperties());
}
if (isValidKey(setting.getKey()) == false && (setting.isGroupSetting() && isValidGroupKey(setting.getKey())) == false) {
throw new IllegalArgumentException("illegal settings key: [" + setting.getKey() + "]");
@ -96,7 +96,7 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
return GROUP_KEY_PATTERN.matcher(key).matches();
}
public Setting.Scope getScope() {
public Setting.Property getScope() {
return this.scope;
}
@ -342,8 +342,9 @@ public abstract class AbstractScopedSettings extends AbstractComponent {
* Returns the value for the given setting.
*/
public <T> T get(Setting<T> setting) {
if (setting.getScope() != scope) {
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] != [" + setting.getScope() + "]");
if (setting.getProperties().contains(scope) == false) {
throw new IllegalArgumentException("settings scope doesn't match the setting scope [" + this.scope + "] not in [" +
setting.getProperties() + "]");
}
if (get(setting.getKey()) == null) {
throw new IllegalArgumentException("setting " + setting.getKey() + " has not been registered");

View File

@ -47,6 +47,7 @@ import org.elasticsearch.cluster.service.InternalClusterService;
import org.elasticsearch.common.logging.ESLoggerFactory;
import org.elasticsearch.common.network.NetworkModule;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.util.concurrent.EsExecutors;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.discovery.DiscoveryModule;
@ -103,7 +104,7 @@ import java.util.function.Predicate;
*/
public final class ClusterSettings extends AbstractScopedSettings {
public ClusterSettings(Settings nodeSettings, Set<Setting<?>> settingsSet) {
super(nodeSettings, settingsSet, Setting.Scope.CLUSTER);
super(nodeSettings, settingsSet, Property.NodeScope);
addSettingsUpdater(new LoggingSettingUpdater(nodeSettings));
}

View File

@ -22,6 +22,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDecider;
import org.elasticsearch.cluster.routing.allocation.decider.ShardsLimitAllocationDecider;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.gateway.PrimaryShardAllocator;
import org.elasticsearch.index.IndexModule;
import org.elasticsearch.index.IndexSettings;
@ -51,7 +52,7 @@ import java.util.function.Predicate;
/**
* Encapsulates all valid index level settings.
* @see org.elasticsearch.common.settings.Setting.Scope#INDEX
* @see Property#IndexScope
*/
public final class IndexScopedSettings extends AbstractScopedSettings {
@ -136,22 +137,22 @@ public final class IndexScopedSettings extends AbstractScopedSettings {
EngineConfig.INDEX_CODEC_SETTING,
IndexWarmer.INDEX_NORMS_LOADING_SETTING,
// validate that built-in similarities don't get redefined
Setting.groupSetting("index.similarity.", false, Setting.Scope.INDEX, (s) -> {
Setting.groupSetting("index.similarity.", (s) -> {
Map<String, Settings> groups = s.getAsGroups();
for (String key : SimilarityService.BUILT_IN.keySet()) {
if (groups.containsKey(key)) {
throw new IllegalArgumentException("illegal value for [index.similarity."+ key + "] cannot redefine built-in similarity");
}
}
}), // this allows similarity settings to be passed
Setting.groupSetting("index.analysis.", false, Setting.Scope.INDEX) // this allows analysis settings to be passed
}, Property.IndexScope), // this allows similarity settings to be passed
Setting.groupSetting("index.analysis.", Property.IndexScope) // this allows analysis settings to be passed
)));
public static final IndexScopedSettings DEFAULT_SCOPED_SETTINGS = new IndexScopedSettings(Settings.EMPTY, IndexScopedSettings.BUILT_IN_INDEX_SETTINGS);
public IndexScopedSettings(Settings settings, Set<Setting<?>> settingsSet) {
super(settings, settingsSet, Setting.Scope.INDEX);
super(settings, settingsSet, Property.IndexScope);
}
private IndexScopedSettings(Settings settings, IndexScopedSettings other, IndexMetaData metaData) {

View File

@ -25,7 +25,9 @@ import org.elasticsearch.action.support.ToXContentToBytes;
import org.elasticsearch.common.Booleans;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.logging.DeprecationLogger;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.MemorySizeValue;
@ -37,6 +39,10 @@ import org.elasticsearch.common.xcontent.XContentType;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.EnumSet;
import java.util.Enumeration;
import java.util.List;
import java.util.Map;
import java.util.Objects;
@ -49,12 +55,12 @@ import java.util.stream.Collectors;
/**
* A setting. Encapsulates typical stuff like default value, parsing, and scope.
* Some (dynamic=true) can by modified at run time using the API.
* Some (SettingsProperty.Dynamic) can by modified at run time using the API.
* All settings inside elasticsearch or in any of the plugins should use this type-safe and generic settings infrastructure
* together with {@link AbstractScopedSettings}. This class contains several utility methods that makes it straight forward
* to add settings for the majority of the cases. For instance a simple boolean settings can be defined like this:
* <pre>{@code
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, false, Scope.CLUSTER);}
* public static final Setting<Boolean>; MY_BOOLEAN = Setting.boolSetting("my.bool.setting", true, SettingsProperty.NodeScope);}
* </pre>
* To retrieve the value of the setting a {@link Settings} object can be passed directly to the {@link Setting#get(Settings)} method.
* <pre>
@ -65,32 +71,81 @@ import java.util.stream.Collectors;
* public enum Color {
* RED, GREEN, BLUE;
* }
* public static final Setting<Color> MY_BOOLEAN = new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, false, Scope.CLUSTER);
* public static final Setting<Color> MY_BOOLEAN =
* new Setting<>("my.color.setting", Color.RED.toString(), Color::valueOf, SettingsProperty.NodeScope);
* }
* </pre>
*/
public class Setting<T> extends ToXContentToBytes {
public enum Property {
/**
* should be filtered in some api (mask password/credentials)
*/
Filtered,
/**
* iff this setting can be dynamically updateable
*/
Dynamic,
/**
* mark this setting as deprecated
*/
Deprecated,
/**
* Node scope
*/
NodeScope,
/**
* Index scope
*/
IndexScope
}
private static final ESLogger logger = Loggers.getLogger(Setting.class);
private static final DeprecationLogger deprecationLogger = new DeprecationLogger(logger);
private final Key key;
protected final Function<Settings, String> defaultValue;
private final Function<String, T> parser;
private final boolean dynamic;
private final Scope scope;
private final EnumSet<Property> properties;
private static final EnumSet<Property> EMPTY_PROPERTIES = EnumSet.noneOf(Property.class);
/**
* Creates a new Setting instance
* Creates a new Setting instance. When no scope is provided, we default to {@link Property#NodeScope}.
* @param key the settings key for this setting.
* @param defaultValue a default value function that returns the default values string representation.
* @param parser a parser that parses the string rep into a complex datatype.
* @param dynamic true iff this setting can be dynamically updateable
* @param scope the scope of this setting
* @param properties properties for this setting like scope, filtering...
*/
public Setting(Key key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
public Setting(Key key, Function<Settings, String> defaultValue, Function<String, T> parser, Property... properties) {
assert parser.apply(defaultValue.apply(Settings.EMPTY)) != null || this.isGroupSetting(): "parser returned null";
this.key = key;
this.defaultValue = defaultValue;
this.parser = parser;
this.dynamic = dynamic;
this.scope = scope;
if (properties == null) {
throw new IllegalArgumentException("properties can not be null for setting [" + key + "]");
}
if (properties.length == 0) {
this.properties = EMPTY_PROPERTIES;
} else {
this.properties = EnumSet.copyOf(Arrays.asList(properties));
}
}
/**
* Creates a new Setting instance
* @param key the settings key for this setting.
* @param defaultValue a default value.
* @param parser a parser that parses the string rep into a complex datatype.
* @param properties properties for this setting like scope, filtering...
*/
public Setting(String key, String defaultValue, Function<String, T> parser, Property... properties) {
this(key, s -> defaultValue, parser, properties);
}
/**
@ -98,11 +153,10 @@ public class Setting<T> extends ToXContentToBytes {
* @param key the settings key for this setting.
* @param defaultValue a default value function that returns the default values string representation.
* @param parser a parser that parses the string rep into a complex datatype.
* @param dynamic true iff this setting can be dynamically updateable
* @param scope the scope of this setting
* @param properties properties for this setting like scope, filtering...
*/
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
this(new SimpleKey(key), defaultValue, parser, dynamic, scope);
public Setting(String key, Function<Settings, String> defaultValue, Function<String, T> parser, Property... properties) {
this(new SimpleKey(key), defaultValue, parser, properties);
}
/**
@ -110,11 +164,10 @@ public class Setting<T> extends ToXContentToBytes {
* @param key the settings key for this setting.
* @param fallBackSetting a setting to fall back to if the current setting is not set.
* @param parser a parser that parses the string rep into a complex datatype.
* @param dynamic true iff this setting can be dynamically updateable
* @param scope the scope of this setting
* @param properties properties for this setting like scope, filtering...
*/
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, boolean dynamic, Scope scope) {
this(key, fallBackSetting::getRaw, parser, dynamic, scope);
public Setting(String key, Setting<T> fallBackSetting, Function<String, T> parser, Property... properties) {
this(key, fallBackSetting::getRaw, parser, properties);
}
/**
@ -136,17 +189,46 @@ public class Setting<T> extends ToXContentToBytes {
}
/**
* Returns <code>true</code> iff this setting is dynamically updateable, otherwise <code>false</code>
* Returns <code>true</code> if this setting is dynamically updateable, otherwise <code>false</code>
*/
public final boolean isDynamic() {
return dynamic;
return properties.contains(Property.Dynamic);
}
/**
* Returns the settings scope
* Returns the setting properties
* @see Property
*/
public final Scope getScope() {
return scope;
public EnumSet<Property> getProperties() {
return properties;
}
/**
* Returns <code>true</code> if this setting must be filtered, otherwise <code>false</code>
*/
public boolean isFiltered() {
return properties.contains(Property.Filtered);
}
/**
* Returns <code>true</code> if this setting has a node scope, otherwise <code>false</code>
*/
public boolean hasNodeScope() {
return properties.contains(Property.NodeScope);
}
/**
* Returns <code>true</code> if this setting has an index scope, otherwise <code>false</code>
*/
public boolean hasIndexScope() {
return properties.contains(Property.IndexScope);
}
/**
* Returns <code>true</code> if this setting is deprecated, otherwise <code>false</code>
*/
public boolean isDeprecated() {
return properties.contains(Property.Deprecated);
}
/**
@ -209,6 +291,12 @@ public class Setting<T> extends ToXContentToBytes {
* instead. This is useful if the value can't be parsed due to an invalid value to access the actual value.
*/
public String getRaw(Settings settings) {
// They're using the setting, so we need to tell them to stop
if (this.isDeprecated() && this.exists(settings)) {
// It would be convenient to show its replacement key, but replacement is often not so simple
deprecationLogger.deprecated("[{}] setting was deprecated in Elasticsearch and it will be removed in a future release! " +
"See the breaking changes lists in the documentation for details", getKey());
}
return settings.get(getKey(), defaultValue.apply(settings));
}
@ -225,8 +313,7 @@ public class Setting<T> extends ToXContentToBytes {
public final XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject();
builder.field("key", key.toString());
builder.field("type", scope.name());
builder.field("dynamic", dynamic);
builder.field("properties", properties);
builder.field("is_group_setting", isGroupSetting());
builder.field("default", defaultValue.apply(Settings.EMPTY));
builder.endObject();
@ -248,14 +335,6 @@ public class Setting<T> extends ToXContentToBytes {
return this;
}
/**
* The settings scope - settings can either be cluster settings or per index settings.
*/
public enum Scope {
CLUSTER,
INDEX;
}
/**
* Build a new updater with a noop validator.
*/
@ -353,38 +432,34 @@ public class Setting<T> extends ToXContentToBytes {
}
public Setting(String key, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
this(key, (s) -> defaultValue, parser, dynamic, scope);
public static Setting<Float> floatSetting(String key, float defaultValue, Property... properties) {
return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, properties);
}
public static Setting<Float> floatSetting(String key, float defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Float.toString(defaultValue), Float::parseFloat, dynamic, scope);
}
public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, boolean dynamic, Scope scope) {
public static Setting<Float> floatSetting(String key, float defaultValue, float minValue, Property... properties) {
return new Setting<>(key, (s) -> Float.toString(defaultValue), (s) -> {
float value = Float.parseFloat(s);
if (value < minValue) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return value;
}, dynamic, scope);
}, properties);
}
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, int maxValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), dynamic, scope);
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, int maxValue, Property... properties) {
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, maxValue, key), properties);
}
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), dynamic, scope);
public static Setting<Integer> intSetting(String key, int defaultValue, int minValue, Property... properties) {
return new Setting<>(key, (s) -> Integer.toString(defaultValue), (s) -> parseInt(s, minValue, key), properties);
}
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), dynamic, scope);
public static Setting<Long> longSetting(String key, long defaultValue, long minValue, Property... properties) {
return new Setting<>(key, (s) -> Long.toString(defaultValue), (s) -> parseLong(s, minValue, key), properties);
}
public static Setting<String> simpleString(String key, boolean dynamic, Scope scope) {
return new Setting<>(key, "", Function.identity(), dynamic, scope);
public static Setting<String> simpleString(String key, Property... properties) {
return new Setting<>(key, s -> "", Function.identity(), properties);
}
public static int parseInt(String s, int minValue, String key) {
@ -418,51 +493,58 @@ public class Setting<T> extends ToXContentToBytes {
return timeValue;
}
public static Setting<Integer> intSetting(String key, int defaultValue, boolean dynamic, Scope scope) {
return intSetting(key, defaultValue, Integer.MIN_VALUE, dynamic, scope);
public static Setting<Integer> intSetting(String key, int defaultValue, Property... properties) {
return intSetting(key, defaultValue, Integer.MIN_VALUE, properties);
}
public static Setting<Boolean> boolSetting(String key, boolean defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, dynamic, scope);
public static Setting<Boolean> boolSetting(String key, boolean defaultValue, Property... properties) {
return new Setting<>(key, (s) -> Boolean.toString(defaultValue), Booleans::parseBooleanExact, properties);
}
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, boolean dynamic, Scope scope) {
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, dynamic, scope);
public static Setting<Boolean> boolSetting(String key, Setting<Boolean> fallbackSetting, Property... properties) {
return new Setting<>(key, fallbackSetting, Booleans::parseBooleanExact, properties);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), dynamic, scope);
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, boolean dynamic, Scope scope) {
return byteSizeSetting(key, (s) -> value.toString(), dynamic, scope);
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, Property... properties) {
return byteSizeSetting(key, (s) -> value.toString(), properties);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, Setting<ByteSizeValue> fallbackSettings, boolean dynamic, Scope scope) {
return byteSizeSetting(key, fallbackSettings::getRaw, dynamic, scope);
public static Setting<ByteSizeValue> byteSizeSetting(String key, Setting<ByteSizeValue> fallbackSettings,
Property... properties) {
return byteSizeSetting(key, fallbackSettings::getRaw, properties);
}
public static Setting<ByteSizeValue> byteSizeSetting(String key, Function<Settings, String> defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), dynamic, scope);
public static Setting<ByteSizeValue> byteSizeSetting(String key, Function<Settings, String> defaultValue,
Property... properties) {
return new Setting<>(key, defaultValue, (s) -> ByteSizeValue.parseBytesSizeValue(s, key), properties);
}
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), dynamic, scope);
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
}
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> defaultStringValue, singleValueParser, dynamic, scope);
public static <T> Setting<List<T>> listSetting(String key, List<String> defaultStringValue, Function<String, T> singleValueParser,
Property... properties) {
return listSetting(key, (s) -> defaultStringValue, singleValueParser, properties);
}
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, dynamic, scope);
public static <T> Setting<List<T>> listSetting(String key, Setting<List<T>> fallbackSetting, Function<String, T> singleValueParser,
Property... properties) {
return listSetting(key, (s) -> parseableStringToList(fallbackSetting.getRaw(s)), singleValueParser, properties);
}
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue, Function<String, T> singleValueParser, boolean dynamic, Scope scope) {
public static <T> Setting<List<T>> listSetting(String key, Function<Settings, List<String>> defaultStringValue,
Function<String, T> singleValueParser, Property... properties) {
Function<String, List<T>> parser = (s) ->
parseableStringToList(s).stream().map(singleValueParser).collect(Collectors.toList());
return new Setting<List<T>>(new ListKey(key), (s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, dynamic, scope) {
return new Setting<List<T>>(new ListKey(key),
(s) -> arrayToParsableString(defaultStringValue.apply(s).toArray(Strings.EMPTY_ARRAY)), parser, properties) {
private final Pattern pattern = Pattern.compile(Pattern.quote(key)+"(\\.\\d+)?");
@Override
public String getRaw(Settings settings) {
String[] array = settings.getAsArray(getKey(), null);
@ -509,11 +591,11 @@ public class Setting<T> extends ToXContentToBytes {
throw new ElasticsearchException(ex);
}
}
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope) {
return groupSetting(key, dynamic, scope, (s) -> {});
public static Setting<Settings> groupSetting(String key, Property... properties) {
return groupSetting(key, (s) -> {}, properties);
}
public static Setting<Settings> groupSetting(String key, boolean dynamic, Scope scope, Consumer<Settings> validator) {
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, dynamic, scope) {
public static Setting<Settings> groupSetting(String key, Consumer<Settings> validator, Property... properties) {
return new Setting<Settings>(new GroupKey(key), (s) -> "", (s) -> null, properties) {
@Override
public boolean isGroupSetting() {
return true;
@ -592,30 +674,37 @@ public class Setting<T> extends ToXContentToBytes {
};
}
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
return new Setting<>(key, defaultValue, (s) -> parseTimeValue(s, minValue, key), dynamic, scope);
public static Setting<TimeValue> timeSetting(String key, Function<Settings, String> defaultValue, TimeValue minValue,
Property... properties) {
return new Setting<>(key, defaultValue, (s) -> {
TimeValue timeValue = TimeValue.parseTimeValue(s, null, key);
if (timeValue.millis() < minValue.millis()) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return timeValue;
}, properties);
}
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, boolean dynamic, Scope scope) {
return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, dynamic, scope);
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, TimeValue minValue, Property... properties) {
return timeSetting(key, (s) -> defaultValue.getStringRep(), minValue, properties);
}
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, boolean dynamic, Scope scope) {
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope);
public static Setting<TimeValue> timeSetting(String key, TimeValue defaultValue, Property... properties) {
return new Setting<>(key, (s) -> defaultValue.toString(), (s) -> TimeValue.parseTimeValue(s, key), properties);
}
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, boolean dynamic, Scope scope) {
return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), dynamic, scope);
public static Setting<TimeValue> timeSetting(String key, Setting<TimeValue> fallbackSetting, Property... properties) {
return new Setting<>(key, fallbackSetting::getRaw, (s) -> TimeValue.parseTimeValue(s, key), properties);
}
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, boolean dynamic, Scope scope) {
public static Setting<Double> doubleSetting(String key, double defaultValue, double minValue, Property... properties) {
return new Setting<>(key, (s) -> Double.toString(defaultValue), (s) -> {
final double d = Double.parseDouble(s);
if (d < minValue) {
throw new IllegalArgumentException("Failed to parse value [" + s + "] for setting [" + key + "] must be >= " + minValue);
}
return d;
}, dynamic, scope);
}, properties);
}
@Override
@ -636,8 +725,9 @@ public class Setting<T> extends ToXContentToBytes {
* can easily be added with this setting. Yet, prefix key settings don't support updaters out of the box unless
* {@link #getConcreteSetting(String)} is used to pull the updater.
*/
public static <T> Setting<T> prefixKeySetting(String prefix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, dynamic, scope);
public static <T> Setting<T> prefixKeySetting(String prefix, String defaultValue, Function<String, T> parser,
Property... properties) {
return affixKeySetting(AffixKey.withPrefix(prefix), (s) -> defaultValue, parser, properties);
}
/**
@ -645,16 +735,19 @@ public class Setting<T> extends ToXContentToBytes {
* storage.${backend}.enable=[true|false] can easily be added with this setting. Yet, adfix key settings don't support updaters
* out of the box unless {@link #getConcreteSetting(String)} is used to pull the updater.
*/
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, dynamic, scope);
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, Function<Settings, String> defaultValue,
Function<String, T> parser, Property... properties) {
return affixKeySetting(AffixKey.withAdfix(prefix, suffix), defaultValue, parser, properties);
}
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, String defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, dynamic, scope);
public static <T> Setting<T> adfixKeySetting(String prefix, String suffix, String defaultValue, Function<String, T> parser,
Property... properties) {
return adfixKeySetting(prefix, suffix, (s) -> defaultValue, parser, properties);
}
public static <T> Setting<T> affixKeySetting(AffixKey key, Function<Settings, String> defaultValue, Function<String, T> parser, boolean dynamic, Scope scope) {
return new Setting<T>(key, defaultValue, parser, dynamic, scope) {
public static <T> Setting<T> affixKeySetting(AffixKey key, Function<Settings, String> defaultValue, Function<String, T> parser,
Property... properties) {
return new Setting<T>(key, defaultValue, parser, properties) {
@Override
boolean isGroupSetting() {
@ -669,7 +762,7 @@ public class Setting<T> extends ToXContentToBytes {
@Override
public Setting<T> getConcreteSetting(String key) {
if (match(key)) {
return new Setting<>(key, defaultValue, parser, dynamic, scope);
return new Setting<>(key, defaultValue, parser, properties);
} else {
throw new IllegalArgumentException("key [" + key + "] must match [" + getKey() + "] but didn't.");
}

View File

@ -35,7 +35,7 @@ public class SettingsModule extends AbstractModule {
private final Settings settings;
private final Set<String> settingsFilterPattern = new HashSet<>();
private final Map<String, Setting<?>> clusterSettings = new HashMap<>();
private final Map<String, Setting<?>> nodeSettings = new HashMap<>();
private final Map<String, Setting<?>> indexSettings = new HashMap<>();
private static final Predicate<String> TRIBE_CLIENT_NODE_SETTINGS_PREDICATE = (s) -> s.startsWith("tribe.") && TribeService.TRIBE_SETTING_KEYS.contains(s) == false;
@ -52,7 +52,7 @@ public class SettingsModule extends AbstractModule {
@Override
protected void configure() {
final IndexScopedSettings indexScopedSettings = new IndexScopedSettings(settings, new HashSet<>(this.indexSettings.values()));
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.clusterSettings.values()));
final ClusterSettings clusterSettings = new ClusterSettings(settings, new HashSet<>(this.nodeSettings.values()));
// by now we are fully configured, lets check node level settings for unregistered index settings
indexScopedSettings.validate(settings.filter(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE));
final Predicate<String> acceptOnlyClusterSettings = TRIBE_CLIENT_NODE_SETTINGS_PREDICATE.or(IndexScopedSettings.INDEX_SETTINGS_KEY_PREDICATE).negate();
@ -71,19 +71,28 @@ public class SettingsModule extends AbstractModule {
* the setting during startup.
*/
public void registerSetting(Setting<?> setting) {
switch (setting.getScope()) {
case CLUSTER:
if (clusterSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
clusterSettings.put(setting.getKey(), setting);
break;
case INDEX:
if (indexSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
indexSettings.put(setting.getKey(), setting);
break;
if (setting.isFiltered()) {
if (settingsFilterPattern.contains(setting.getKey()) == false) {
registerSettingsFilter(setting.getKey());
}
}
// We validate scope settings. We should have one and only one scope.
if (setting.hasNodeScope() && setting.hasIndexScope()) {
throw new IllegalArgumentException("More than one scope has been added to the setting [" + setting.getKey() + "]");
}
if (setting.hasNodeScope()) {
if (nodeSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
nodeSettings.put(setting.getKey(), setting);
} else if (setting.hasIndexScope()) {
if (indexSettings.containsKey(setting.getKey())) {
throw new IllegalArgumentException("Cannot register setting [" + setting.getKey() + "] twice");
}
indexSettings.put(setting.getKey(), setting);
} else {
throw new IllegalArgumentException("No scope found for setting [" + setting.getKey() + "]");
}
}
@ -101,21 +110,15 @@ public class SettingsModule extends AbstractModule {
settingsFilterPattern.add(filter);
}
public void registerSettingsFilterIfMissing(String filter) {
if (settingsFilterPattern.contains(filter) == false) {
registerSettingsFilter(filter);
}
}
/**
* Check if a setting has already been registered
*/
public boolean exists(Setting<?> setting) {
switch (setting.getScope()) {
case CLUSTER:
return clusterSettings.containsKey(setting.getKey());
case INDEX:
return indexSettings.containsKey(setting.getKey());
if (setting.hasNodeScope()) {
return nodeSettings.containsKey(setting.getKey());
}
if (setting.hasIndexScope()) {
return indexSettings.containsKey(setting.getKey());
}
throw new IllegalArgumentException("setting scope is unknown. This should never happen!");
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.common.util.concurrent;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.util.Arrays;
@ -41,7 +42,8 @@ public class EsExecutors {
* Settings key to manually set the number of available processors.
* This is used to adjust thread pools sizes etc. per node.
*/
public static final Setting<Integer> PROCESSORS_SETTING = Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, false, Setting.Scope.CLUSTER) ;
public static final Setting<Integer> PROCESSORS_SETTING =
Setting.intSetting("processors", Math.min(32, Runtime.getRuntime().availableProcessors()), 1, Property.NodeScope);
/**
* Returns the number of processors available but at most <tt>32</tt>.

View File

@ -19,11 +19,11 @@
package org.elasticsearch.common.util.concurrent;
import org.apache.lucene.util.CloseableThreadLocal;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Writeable;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import java.io.Closeable;
@ -63,7 +63,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
public final class ThreadContext implements Closeable, Writeable<ThreadContext.ThreadContextStruct>{
public static final String PREFIX = "request.headers";
public static final Setting<Settings> DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", false, Setting.Scope.CLUSTER);
public static final Setting<Settings> DEFAULT_HEADERS_SETTING = Setting.groupSetting(PREFIX + ".", Property.NodeScope);
private final Map<String, String> defaultHeader;
private static final ThreadContextStruct DEFAULT_CONTEXT = new ThreadContextStruct(Collections.emptyMap());
private final ContextThreadLocal threadLocal;

View File

@ -23,6 +23,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.inject.AbstractModule;
import org.elasticsearch.common.inject.multibindings.Multibinder;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.ExtensionPoint;
import org.elasticsearch.discovery.local.LocalDiscovery;
@ -45,10 +46,11 @@ import java.util.function.Function;
*/
public class DiscoveryModule extends AbstractModule {
public static final Setting<String> DISCOVERY_TYPE_SETTING = new Setting<>("discovery.type",
settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING = new Setting<>("discovery.zen.masterservice.type",
"zen", Function.identity(), false, Setting.Scope.CLUSTER);
public static final Setting<String> DISCOVERY_TYPE_SETTING =
new Setting<>("discovery.type", settings -> DiscoveryNode.localNode(settings) ? "local" : "zen", Function.identity(),
Property.NodeScope);
public static final Setting<String> ZEN_MASTER_SERVICE_TYPE_SETTING =
new Setting<>("discovery.zen.masterservice.type", "zen", Function.identity(), Property.NodeScope);
private final Settings settings;
private final Map<String, List<Class<? extends UnicastHostsProvider>>> unicastHostProviders = new HashMap<>();

Some files were not shown because too many files have changed in this diff Show More