Merge branch 'master' into feature/rank-eval

This commit is contained in:
Christoph Büscher 2016-09-13 12:47:42 +02:00
commit 9e2ec53458
101 changed files with 2765 additions and 1877 deletions

View File

@ -364,6 +364,7 @@ These are the linux flavors the Vagrantfile currently supports:
* ubuntu-1204 aka precise * ubuntu-1204 aka precise
* ubuntu-1404 aka trusty * ubuntu-1404 aka trusty
* ubuntu-1504 aka vivid * ubuntu-1504 aka vivid
* ubuntu-1604 aka xenial
* debian-8 aka jessie, the current debian stable distribution * debian-8 aka jessie, the current debian stable distribution
* centos-6 * centos-6
* centos-7 * centos-7

7
Vagrantfile vendored
View File

@ -37,6 +37,13 @@ Vagrant.configure(2) do |config|
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana [ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL SHELL
end end
config.vm.define "ubuntu-1604" do |config|
config.vm.box = "elastic/ubuntu-16.04-x86_64"
ubuntu_common config, extra: <<-SHELL
# Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
SHELL
end
# Wheezy's backports don't contain Openjdk 8 and the backflips required to # Wheezy's backports don't contain Openjdk 8 and the backflips required to
# get the sun jdk on there just aren't worth it. We have jessie for testing # get the sun jdk on there just aren't worth it. We have jessie for testing
# debian and it works fine. # debian and it works fine.

View File

@ -157,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
private static String findJavaHome() { private static String findJavaHome() {
String javaHome = System.getenv('JAVA_HOME') String javaHome = System.getenv('JAVA_HOME')
if (javaHome == null) { if (javaHome == null) {
if (System.getProperty("idea.active") != null) { if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) {
// intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with // intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with
javaHome = Jvm.current().javaHome javaHome = Jvm.current().javaHome
} else { } else {
@ -405,9 +405,9 @@ class BuildPlugin implements Plugin<Project> {
//options.incremental = true //options.incremental = true
if (project.javaVersion == JavaVersion.VERSION_1_9) { if (project.javaVersion == JavaVersion.VERSION_1_9) {
// hack until gradle supports java 9's new "-release" arg // hack until gradle supports java 9's new "--release" arg
assert minimumJava == JavaVersion.VERSION_1_8 assert minimumJava == JavaVersion.VERSION_1_8
options.compilerArgs << '-release' << '8' options.compilerArgs << '--release' << '8'
project.sourceCompatibility = null project.sourceCompatibility = null
project.targetCompatibility = null project.targetCompatibility = null
} }

View File

@ -38,17 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParsingException; import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Randomness; import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.index.query.ParsedQuery;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.query.QueryShardException; import org.elasticsearch.index.query.QueryShardException;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
@ -67,25 +61,15 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
*/ */
public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> { public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
private final IndicesService indicesService; private final SearchService searchService;
private final ScriptService scriptService;
private final BigArrays bigArrays;
private final FetchPhase fetchPhase;
@Inject @Inject
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, ScriptService scriptService, TransportService transportService, SearchService searchService, ActionFilters actionFilters,
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters, super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH); indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
this.indicesService = indicesService; this.searchService = searchService;
this.scriptService = scriptService;
this.bigArrays = bigArrays;
this.fetchPhase = fetchPhase;
} }
@Override @Override
@ -161,29 +145,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
@Override @Override
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) { protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().id());
boolean valid; boolean valid;
String explanation = null; String explanation = null;
String error = null; String error = null;
Engine.Searcher searcher = indexShard.acquireSearcher("validate_query"); ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
request.nowInMillis(), request.filteringAliases());
DefaultSearchContext searchContext = new DefaultSearchContext(0, SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
indexService, indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(),
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
SearchContext.setCurrent(searchContext); SearchContext.setCurrent(searchContext);
try { try {
searchContext.parsedQuery(searchContext.getQueryShardContext().toQuery(request.query())); ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
searchContext.preProcess(); searchContext.parsedQuery(parsedQuery);
searchContext.preProcess(request.rewrite());
valid = true; valid = true;
if (request.rewrite()) { explanation = explain(searchContext, request.rewrite());
explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
} else if (request.explain()) {
explanation = searchContext.filteredQuery().query().toString();
}
} catch (QueryShardException|ParsingException e) { } catch (QueryShardException|ParsingException e) {
valid = false; valid = false;
error = e.getDetailedMessage(); error = e.getDetailedMessage();
@ -191,19 +166,18 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
valid = false; valid = false;
error = e.getMessage(); error = e.getMessage();
} finally { } finally {
searchContext.close(); Releasables.close(searchContext, () -> SearchContext.removeCurrent());
SearchContext.removeCurrent();
} }
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error); return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
} }
private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException { private String explain(SearchContext context, boolean rewritten) throws IOException {
Query queryRewrite = searcher.rewrite(query); Query query = context.query();
if (queryRewrite instanceof MatchNoDocsQuery) { if (rewritten && query instanceof MatchNoDocsQuery) {
return query.toString(); return context.parsedQuery().query().toString();
} else { } else {
return queryRewrite.toString(); return query.toString();
} }
} }
} }

View File

@ -31,20 +31,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator; import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lease.Releasables;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.get.GetResult; import org.elasticsearch.index.get.GetResult;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper; import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchLocalRequest; import org.elasticsearch.search.internal.ShardSearchLocalRequest;
import org.elasticsearch.search.rescore.RescoreSearchContext; import org.elasticsearch.search.rescore.RescoreSearchContext;
@ -60,26 +54,15 @@ import java.io.IOException;
// TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain. // TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain.
public class TransportExplainAction extends TransportSingleShardAction<ExplainRequest, ExplainResponse> { public class TransportExplainAction extends TransportSingleShardAction<ExplainRequest, ExplainResponse> {
private final IndicesService indicesService; private final SearchService searchService;
private final ScriptService scriptService;
private final BigArrays bigArrays;
private final FetchPhase fetchPhase;
@Inject @Inject
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, IndicesService indicesService, ScriptService scriptService, TransportService transportService, SearchService searchService, ActionFilters actionFilters,
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver, IndexNameExpressionResolver indexNameExpressionResolver) {
FetchPhase fetchPhase) {
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver, super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
ExplainRequest::new, ThreadPool.Names.GET); ExplainRequest::new, ThreadPool.Names.GET);
this.indicesService = indicesService; this.searchService = searchService;
this.scriptService = scriptService;
this.bigArrays = bigArrays;
this.fetchPhase = fetchPhase;
} }
@Override @Override
@ -104,23 +87,19 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
@Override @Override
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) { protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex()); ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
IndexShard indexShard = indexService.getShard(shardId.id()); new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id())); Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm)); SearchContext.setCurrent(context);
Engine.GetResult result = null;
try {
result = context.indexShard().get(new Engine.Get(false, uidTerm));
if (!result.exists()) { if (!result.exists()) {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false); return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
} }
SearchContext context = new DefaultSearchContext(0,
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
result.searcher(), indexService, indexShard, scriptService, bigArrays,
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
SearchContext.setCurrent(context);
try {
context.parsedQuery(context.getQueryShardContext().toQuery(request.query())); context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
context.preProcess(); context.preProcess(true);
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase; int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId); Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
for (RescoreSearchContext ctx : context.rescore()) { for (RescoreSearchContext ctx : context.rescore()) {
@ -131,7 +110,8 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
// Advantage is that we're not opening a second searcher to retrieve the _source. Also // Advantage is that we're not opening a second searcher to retrieve the _source. Also
// because we are working in the same searcher in engineGetResult we can be sure that a // because we are working in the same searcher in engineGetResult we can be sure that a
// doc isn't deleted between the initial get and this call. // doc isn't deleted between the initial get and this call.
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext()); GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.fields(),
request.fetchSourceContext());
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult); return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
} else { } else {
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation); return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
@ -139,8 +119,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
} catch (IOException e) { } catch (IOException e) {
throw new ElasticsearchException("Could not explain", e); throw new ElasticsearchException("Could not explain", e);
} finally { } finally {
context.close(); Releasables.close(result, context, () -> SearchContext.removeCurrent());
SearchContext.removeCurrent();
} }
} }

View File

@ -61,7 +61,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
private VersionType versionType = VersionType.INTERNAL; private VersionType versionType = VersionType.INTERNAL;
private long version = Versions.MATCH_ANY; private long version = Versions.MATCH_ANY;
private boolean ignoreErrorsOnGeneratedFields;
public GetRequest() { public GetRequest() {
type = "_all"; type = "_all";
@ -248,19 +247,10 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
return this; return this;
} }
public GetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
return this;
}
public VersionType versionType() { public VersionType versionType() {
return this.versionType; return this.versionType;
} }
public boolean ignoreErrorsOnGeneratedFields() {
return ignoreErrorsOnGeneratedFields;
}
@Override @Override
public void readFrom(StreamInput in) throws IOException { public void readFrom(StreamInput in) throws IOException {
super.readFrom(in); super.readFrom(in);
@ -278,7 +268,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
} }
} }
realtime = in.readBoolean(); realtime = in.readBoolean();
this.ignoreErrorsOnGeneratedFields = in.readBoolean();
this.versionType = VersionType.fromValue(in.readByte()); this.versionType = VersionType.fromValue(in.readByte());
this.version = in.readLong(); this.version = in.readLong();
@ -304,7 +293,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
} }
} }
out.writeBoolean(realtime); out.writeBoolean(realtime);
out.writeBoolean(ignoreErrorsOnGeneratedFields);
out.writeByte(versionType.getValue()); out.writeByte(versionType.getValue());
out.writeLong(version); out.writeLong(version);
out.writeOptionalStreamable(fetchSourceContext); out.writeOptionalStreamable(fetchSourceContext);

View File

@ -155,11 +155,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
return this; return this;
} }
public GetRequestBuilder setIgnoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
return this;
}
/** /**
* Sets the version, which will cause the get operation to only be performed if a matching * Sets the version, which will cause the get operation to only be performed if a matching
* version exists and no changes happened on the doc since then. * version exists and no changes happened on the doc since then.

View File

@ -262,8 +262,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
String preference; String preference;
boolean realtime = true; boolean realtime = true;
boolean refresh; boolean refresh;
public boolean ignoreErrorsOnGeneratedFields = false;
List<Item> items = new ArrayList<>(); List<Item> items = new ArrayList<>();
public List<Item> getItems() { public List<Item> getItems() {
@ -338,11 +336,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
} }
public MultiGetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
return this;
}
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception { public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true); return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
} }
@ -510,7 +503,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
preference = in.readOptionalString(); preference = in.readOptionalString();
refresh = in.readBoolean(); refresh = in.readBoolean();
realtime = in.readBoolean(); realtime = in.readBoolean();
ignoreErrorsOnGeneratedFields = in.readBoolean();
int size = in.readVInt(); int size = in.readVInt();
items = new ArrayList<>(size); items = new ArrayList<>(size);
@ -525,7 +517,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
out.writeOptionalString(preference); out.writeOptionalString(preference);
out.writeBoolean(refresh); out.writeBoolean(refresh);
out.writeBoolean(realtime); out.writeBoolean(realtime);
out.writeBoolean(ignoreErrorsOnGeneratedFields);
out.writeVInt(items.size()); out.writeVInt(items.size());
for (Item item : items) { for (Item item : items) {

View File

@ -80,9 +80,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest
request.realtime(realtime); request.realtime(realtime);
return this; return this;
} }
public MultiGetRequestBuilder setIgnoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
return this;
}
} }

View File

@ -35,7 +35,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
private String preference; private String preference;
boolean realtime = true; boolean realtime = true;
boolean refresh; boolean refresh;
boolean ignoreErrorsOnGeneratedFields = false;
IntArrayList locations; IntArrayList locations;
List<MultiGetRequest.Item> items; List<MultiGetRequest.Item> items;
@ -52,7 +51,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
preference = multiGetRequest.preference; preference = multiGetRequest.preference;
realtime = multiGetRequest.realtime; realtime = multiGetRequest.realtime;
refresh = multiGetRequest.refresh; refresh = multiGetRequest.refresh;
ignoreErrorsOnGeneratedFields = multiGetRequest.ignoreErrorsOnGeneratedFields;
} }
@Override @Override
@ -87,11 +85,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
return this; return this;
} }
public MultiGetShardRequest ignoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
return this;
}
public boolean refresh() { public boolean refresh() {
return this.refresh; return this.refresh;
} }
@ -130,7 +123,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
preference = in.readOptionalString(); preference = in.readOptionalString();
refresh = in.readBoolean(); refresh = in.readBoolean();
realtime = in.readBoolean(); realtime = in.readBoolean();
ignoreErrorsOnGeneratedFields = in.readBoolean();
} }
@Override @Override
@ -146,11 +138,5 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
out.writeOptionalString(preference); out.writeOptionalString(preference);
out.writeBoolean(refresh); out.writeBoolean(refresh);
out.writeBoolean(realtime); out.writeBoolean(realtime);
out.writeBoolean(ignoreErrorsOnGeneratedFields);
}
public boolean ignoreErrorsOnGeneratedFields() {
return ignoreErrorsOnGeneratedFields;
} }
} }

View File

@ -93,7 +93,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
} }
GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(), GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields()); request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
return new GetResponse(result); return new GetResponse(result);
} }

View File

@ -88,13 +88,15 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
for (int i = 0; i < request.locations.size(); i++) { for (int i = 0; i < request.locations.size(); i++) {
MultiGetRequest.Item item = request.items.get(i); MultiGetRequest.Item item = request.items.get(i);
try { try {
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(), item.versionType(), item.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields()); GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(),
item.versionType(), item.fetchSourceContext());
response.add(request.locations.get(i), new GetResponse(getResult)); response.add(request.locations.get(i), new GetResponse(getResult));
} catch (Exception e) { } catch (Exception e) {
if (TransportActions.isShardNotAvailableException(e)) { if (TransportActions.isShardNotAvailableException(e)) {
throw (ElasticsearchException) e; throw (ElasticsearchException) e;
} else { } else {
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e); logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
item.type(), item.id()), e);
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e)); response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
} }
} }

View File

@ -40,8 +40,6 @@ import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchPhaseResult; import org.elasticsearch.search.SearchPhaseResult;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.internal.ShardSearchTransportRequest;

View File

@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult;

View File

@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.dfs.AggregatedDfs; import org.elasticsearch.search.dfs.AggregatedDfs;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult;

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.search.controller; package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList; import com.carrotsearch.hppc.IntArrayList;
import com.carrotsearch.hppc.ObjectObjectHashMap; import com.carrotsearch.hppc.ObjectObjectHashMap;
@ -89,8 +89,7 @@ public class SearchPhaseController extends AbstractComponent {
private final ScriptService scriptService; private final ScriptService scriptService;
private final ClusterService clusterService; private final ClusterService clusterService;
@Inject SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
super(settings); super(settings);
this.bigArrays = bigArrays; this.bigArrays = bigArrays;
this.scriptService = scriptService; this.scriptService = scriptService;

View File

@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver; import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;
import org.elasticsearch.search.internal.ShardSearchTransportRequest; import org.elasticsearch.search.internal.ShardSearchTransportRequest;

View File

@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget; import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchSearchRequest; import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
import org.elasticsearch.search.internal.InternalSearchResponse; import org.elasticsearch.search.internal.InternalSearchResponse;

View File

@ -28,8 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.QueryFetchSearchResult; import org.elasticsearch.search.fetch.QueryFetchSearchResult;
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult; import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest;

View File

@ -29,8 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.util.concurrent.AtomicArray; import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchSearchResult; import org.elasticsearch.search.fetch.FetchSearchResult;
import org.elasticsearch.search.fetch.ShardFetchRequest; import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest;

View File

@ -17,17 +17,15 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.search.action; package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionListenerResponseHandler; import org.elasticsearch.action.ActionListenerResponseHandler;
import org.elasticsearch.action.IndicesRequest; import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.OriginalIndices; import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.IndicesOptions; import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput; import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput; import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -75,8 +73,7 @@ public class SearchTransportService extends AbstractComponent {
private final TransportService transportService; private final TransportService transportService;
private final SearchService searchService; private final SearchService searchService;
@Inject SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
super(settings); super(settings);
this.transportService = transportService; this.transportService = transportService;
this.searchService = searchService; this.searchService = searchService;

View File

@ -32,7 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown; import org.elasticsearch.common.util.concurrent.CountDown;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -53,11 +53,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
@Inject @Inject
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool, public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
ClusterService clusterService, SearchTransportService searchTransportService, ClusterService clusterService, SearchService searchService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new); super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
this.clusterService = clusterService; this.clusterService = clusterService;
this.searchTransportService = searchTransportService; this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
} }
@Override @Override

View File

@ -29,10 +29,11 @@ import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.index.IndexNotFoundException; import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.indices.IndexClosedException; import org.elasticsearch.indices.IndexClosedException;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -53,13 +54,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
@Inject @Inject
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController, public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
TransportService transportService, SearchTransportService searchTransportService, TransportService transportService, SearchService searchService,
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
indexNameExpressionResolver) { indexNameExpressionResolver) {
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new); super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
this.searchPhaseController = searchPhaseController; this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);;
this.searchTransportService = searchTransportService; this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
this.clusterService = clusterService; this.clusterService = clusterService;
} }

View File

@ -26,8 +26,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.search.controller.SearchPhaseController; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchService;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
@ -45,15 +46,15 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
private final SearchPhaseController searchPhaseController; private final SearchPhaseController searchPhaseController;
@Inject @Inject
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService, public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
ClusterService clusterService, SearchTransportService searchTransportService, TransportService transportService,
SearchPhaseController searchPhaseController, ClusterService clusterService, SearchService searchService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) { ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
SearchScrollRequest::new); SearchScrollRequest::new);
this.clusterService = clusterService; this.clusterService = clusterService;
this.searchTransportService = searchTransportService; this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
this.searchPhaseController = searchPhaseController; this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);
} }
@Override @Override

View File

@ -76,7 +76,7 @@ public class UpdateHelper extends AbstractComponent {
public Result prepare(UpdateRequest request, IndexShard indexShard) { public Result prepare(UpdateRequest request, IndexShard indexShard) {
final GetResult getResult = indexShard.getService().get(request.type(), request.id(), final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME}, new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false); true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
return prepare(indexShard.shardId(), request, getResult); return prepare(indexShard.shardId(), request, getResult);
} }

View File

@ -257,11 +257,6 @@ final class Security {
for (Path path : environment.dataFiles()) { for (Path path : environment.dataFiles()) {
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete"); addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
} }
// TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
// https://github.com/elastic/elasticsearch/issues/20391
for (Path path : environment.dataWithClusterFiles()) {
addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
}
for (Path path : environment.repoFiles()) { for (Path path : environment.repoFiles()) {
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete"); addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
} }

View File

@ -607,23 +607,21 @@ public class IndexNameExpressionResolver extends AbstractComponent {
add = false; add = false;
expression = expression.substring(1); expression = expression.substring(1);
} }
if (result == null) {
// add all the previous ones...
result = new HashSet<>(expressions.subList(0, i));
}
if (!Regex.isSimpleMatchPattern(expression)) { if (!Regex.isSimpleMatchPattern(expression)) {
if (!unavailableIgnoredOrExists(options, metaData, expression)) { if (!unavailableIgnoredOrExists(options, metaData, expression)) {
throw infe(expression); throw infe(expression);
} }
if (result != null) {
if (add) { if (add) {
result.add(expression); result.add(expression);
} else { } else {
result.remove(expression); result.remove(expression);
} }
}
continue; continue;
} }
if (result == null) {
// add all the previous ones...
result = new HashSet<>(expressions.subList(0, i));
}
final IndexMetaData.State excludeState = excludeState(options); final IndexMetaData.State excludeState = excludeState(options);
final Map<String, AliasOrIndex> matches = matches(metaData, expression); final Map<String, AliasOrIndex> matches = matches(metaData, expression);

View File

@ -0,0 +1,205 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.common.Nullable;
import java.util.Collections;
import java.util.Map;
import java.util.Objects;
/**
* Represents the allocation decision by an allocator for an unassigned shard.
*/
public class UnassignedShardDecision {
/** a constant representing a shard decision where no decision was taken */
public static final UnassignedShardDecision DECISION_NOT_TAKEN =
new UnassignedShardDecision(null, null, null, null, null, null);
@Nullable
private final Decision finalDecision;
@Nullable
private final AllocationStatus allocationStatus;
@Nullable
private final String finalExplanation;
@Nullable
private final String assignedNodeId;
@Nullable
private final String allocationId;
@Nullable
private final Map<String, Decision> nodeDecisions;
private UnassignedShardDecision(Decision finalDecision,
AllocationStatus allocationStatus,
String finalExplanation,
String assignedNodeId,
String allocationId,
Map<String, Decision> nodeDecisions) {
assert finalExplanation != null || finalDecision == null :
"if a decision was taken, there must be an explanation for it";
assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES :
"a yes decision must have a node to assign the shard to";
assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES :
"only a yes decision should not have an allocation status";
assert allocationId == null || assignedNodeId != null :
"allocation id can only be null if the assigned node is null";
this.finalDecision = finalDecision;
this.allocationStatus = allocationStatus;
this.finalExplanation = finalExplanation;
this.assignedNodeId = assignedNodeId;
this.allocationId = allocationId;
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
}
/**
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision.
*/
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) {
return noDecision(allocationStatus, explanation, null);
}
/**
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision,
* as well as the individual node-level decisions that comprised the final NO decision.
*/
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus,
String explanation,
@Nullable Map<String, Decision> nodeDecisions) {
Objects.requireNonNull(explanation, "explanation must not be null");
Objects.requireNonNull(allocationStatus, "allocationStatus must not be null");
return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions);
}
/**
* Creates a THROTTLE decision with the given explanation and individual node-level decisions that
* comprised the final THROTTLE decision.
*/
public static UnassignedShardDecision throttleDecision(String explanation,
Map<String, Decision> nodeDecisions) {
Objects.requireNonNull(explanation, "explanation must not be null");
return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
nodeDecisions);
}
/**
* Creates a YES decision with the given explanation and individual node-level decisions that
* comprised the final YES decision, along with the node id to which the shard is assigned and
* the allocation id for the shard, if available.
*/
public static UnassignedShardDecision yesDecision(String explanation,
String assignedNodeId,
@Nullable String allocationId,
Map<String, Decision> nodeDecisions) {
Objects.requireNonNull(explanation, "explanation must not be null");
Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null");
return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions);
}
/**
* Returns <code>true</code> if a decision was taken by the allocator, {@code false} otherwise.
* If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
*/
public boolean isDecisionTaken() {
return finalDecision != null;
}
/**
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
* This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}.
*/
@Nullable
public Decision getFinalDecision() {
return finalDecision;
}
/**
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
* throw an {@code IllegalArgumentException}.
*/
public Decision getFinalDecisionSafe() {
if (isDecisionTaken() == false) {
throw new IllegalArgumentException("decision must have been taken in order to return the final decision");
}
return finalDecision;
}
/**
* Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if
* no decision was taken or if the decision was {@link Decision.Type#YES}.
*/
@Nullable
public AllocationStatus getAllocationStatus() {
return allocationStatus;
}
/**
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
*/
@Nullable
public String getFinalExplanation() {
return finalExplanation;
}
/**
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
* throw an {@code IllegalArgumentException}.
*/
public String getFinalExplanationSafe() {
if (isDecisionTaken() == false) {
throw new IllegalArgumentException("decision must have been taken in order to return the final explanation");
}
return finalExplanation;
}
/**
* Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns
* a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
*/
@Nullable
public String getAssignedNodeId() {
return assignedNodeId;
}
/**
* Gets the allocation id for the existing shard copy that the allocator is assigning the shard to.
* This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value
* and the node on which the shard is assigned already has a shard copy with an in-sync allocation id
* that we can re-use.
*/
@Nullable
public String getAllocationId() {
return allocationId;
}
/**
* Gets the individual node-level decisions that went into making the final decision as represented by
* {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision}
* as the decision for the given node.
*/
@Nullable
public Map<String, Decision> getNodeDecisions() {
return nodeDecisions;
}
}

View File

@ -74,7 +74,7 @@ public class AllocationDeciders extends AllocationDecider {
// short track if a NO is returned. // short track if a NO is returned.
if (decision == Decision.NO) { if (decision == Decision.NO) {
if (logger.isTraceEnabled()) { if (logger.isTraceEnabled()) {
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName()); logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName());
} }
// short circuit only if debugging is not enabled // short circuit only if debugging is not enabled
if (!allocation.debugDecision()) { if (!allocation.debugDecision()) {

View File

@ -318,7 +318,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) { if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
throw new IllegalStateException("Shouldn't publish state when not master"); throw new IllegalStateException("Shouldn't publish state when not master");
} }
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
try { try {
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener); publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
} catch (FailedToCommitClusterStateException t) { } catch (FailedToCommitClusterStateException t) {
@ -338,6 +338,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
}); });
throw t; throw t;
} }
// update the set of nodes to ping after the new cluster state has been published
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
}
/**
* Gets the current set of nodes involved in the node fault detection.
* NB: for testing purposes
*/
public Set<DiscoveryNode> getFaultDetectionNodes() {
return nodesFD.getNodes();
} }
@Override @Override

View File

@ -41,6 +41,8 @@ import org.elasticsearch.transport.TransportResponseHandler;
import org.elasticsearch.transport.TransportService; import org.elasticsearch.transport.TransportService;
import java.io.IOException; import java.io.IOException;
import java.util.Collections;
import java.util.Set;
import java.util.concurrent.ConcurrentMap; import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.CopyOnWriteArrayList;
@ -91,6 +93,14 @@ public class NodesFaultDetection extends FaultDetection {
listeners.remove(listener); listeners.remove(listener);
} }
/**
* Gets the current set of nodes involved in node fault detection.
* NB: For testing purposes.
*/
public Set<DiscoveryNode> getNodes() {
return Collections.unmodifiableSet(nodesFD.keySet());
}
/** /**
* make sure that nodes in clusterState are pinged. Any pinging to nodes which are not * make sure that nodes in clusterState are pinged. Any pinging to nodes which are not
* part of the cluster will be stopped * part of the cluster will be stopped

View File

@ -209,13 +209,6 @@ public final class NodeEnvironment implements Closeable {
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) { for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex]; Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
Path dataDir = environment.dataFiles()[dirIndex]; Path dataDir = environment.dataFiles()[dirIndex];
// TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger);
deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
"Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
dataDir = dataDirWithClusterName;
}
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId)); Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
Files.createDirectories(dir); Files.createDirectories(dir);
@ -289,25 +282,6 @@ public final class NodeEnvironment implements Closeable {
} }
} }
// Visible for testing
/** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */
static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException {
if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist
Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory
dirEmpty(dataPathWithClusterName)) { // Or if it's empty
// No need to read from cluster-name folder!
return false;
}
// The "nodes" directory inside of the cluster name
Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER);
if (Files.isDirectory(nodesPath)) {
// The cluster has data in the "nodes" so we should read from the cluster-named folder for now
return true;
}
// Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate
return false;
}
private static void releaseAndNullLocks(Lock[] locks) { private static void releaseAndNullLocks(Lock[] locks) {
for (int i = 0; i < locks.length; i++) { for (int i = 0; i < locks.length; i++) {
if (locks[i] != null) { if (locks[i] != null) {

View File

@ -0,0 +1,88 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.gateway;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent;
import org.elasticsearch.common.settings.Settings;
/**
* An abstract class that implements basic functionality for allocating
* shards to nodes based on shard copies that already exist in the cluster.
*
* Individual implementations of this class are responsible for providing
* the logic to determine to which nodes (if any) those shards are allocated.
*/
public abstract class BaseGatewayShardAllocator extends AbstractComponent {
public BaseGatewayShardAllocator(Settings settings) {
super(settings);
}
/**
* Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist.
* It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)}
* to make decisions on assigning shards to nodes.
*
* @param allocation the allocation state container object
*/
public void allocateUnassigned(RoutingAllocation allocation) {
final RoutingNodes routingNodes = allocation.routingNodes();
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
while (unassignedIterator.hasNext()) {
final ShardRouting shard = unassignedIterator.next();
final UnassignedShardDecision unassignedShardDecision = makeAllocationDecision(shard, allocation, logger);
if (unassignedShardDecision.isDecisionTaken() == false) {
// no decision was taken by this allocator
continue;
}
if (unassignedShardDecision.getFinalDecisionSafe().type() == Decision.Type.YES) {
unassignedIterator.initialize(unassignedShardDecision.getAssignedNodeId(),
unassignedShardDecision.getAllocationId(),
shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
allocation.changes());
} else {
unassignedIterator.removeAndIgnore(unassignedShardDecision.getAllocationStatus(), allocation.changes());
}
}
}
/**
* Make a decision on the allocation of an unassigned shard. This method is used by
* {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not
* the shard can be allocated by this allocator and if so, to which node it will be allocated.
*
* @param unassignedShard the unassigned shard to allocate
* @param allocation the current routing state
* @param logger the logger
* @return an {@link UnassignedShardDecision} with the final decision of whether to allocate and details of the decision
*/
public abstract UnassignedShardDecision makeAllocationDecision(ShardRouting unassignedShard,
RoutingAllocation allocation,
Logger logger);
}

View File

@ -19,12 +19,12 @@
package org.elasticsearch.gateway; package org.elasticsearch.gateway;
import org.apache.logging.log4j.Logger;
import org.apache.logging.log4j.message.ParameterizedMessage; import org.apache.logging.log4j.message.ParameterizedMessage;
import org.apache.logging.log4j.util.Supplier; import org.apache.logging.log4j.util.Supplier;
import org.apache.lucene.util.CollectionUtil; import org.apache.lucene.util.CollectionUtil;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource; import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
@ -32,19 +32,23 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.gateway.AsyncShardFetch.FetchResult;
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards; import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
import org.elasticsearch.index.shard.ShardStateMetaData; import org.elasticsearch.index.shard.ShardStateMetaData;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.Comparator; import java.util.Comparator;
import java.util.LinkedHashMap;
import java.util.LinkedList; import java.util.LinkedList;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.function.Function; import java.util.function.Function;
import java.util.stream.Collectors; import java.util.stream.Collectors;
@ -62,7 +66,7 @@ import java.util.stream.Collectors;
* nor does it allocate primaries when a primary shard failed and there is a valid replica * nor does it allocate primaries when a primary shard failed and there is a valid replica
* copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}. * copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}.
*/ */
public abstract class PrimaryShardAllocator extends AbstractComponent { public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
private static final Function<String, String> INITIAL_SHARDS_PARSER = (value) -> { private static final Function<String, String> INITIAL_SHARDS_PARSER = (value) -> {
switch (value) { switch (value) {
@ -94,110 +98,161 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings)); logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings));
} }
public void allocateUnassigned(RoutingAllocation allocation) { /**
final RoutingNodes routingNodes = allocation.routingNodes(); * Is the allocator responsible for allocating the given {@link ShardRouting}?
final MetaData metaData = allocation.metaData(); */
private static boolean isResponsibleFor(final ShardRouting shard) {
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); return shard.primary() // must be primary
while (unassignedIterator.hasNext()) { && shard.unassigned() // must be unassigned
final ShardRouting shard = unassignedIterator.next(); // only handle either an existing store or a snapshot recovery
&& (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE
if (shard.primary() == false) { || shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT);
continue;
} }
if (shard.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE && @Override
shard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) { public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
continue; final RoutingAllocation allocation,
final Logger logger) {
if (isResponsibleFor(unassignedShard) == false) {
// this allocator is not responsible for allocating this shard
return UnassignedShardDecision.DECISION_NOT_TAKEN;
} }
final AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState = fetchData(shard, allocation); final boolean explain = allocation.debugDecision();
final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation);
if (shardState.hasData() == false) { if (shardState.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
allocation.setHasPendingAsyncFetch(); allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes()); return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
continue; "still fetching shard state from the nodes in the cluster");
} }
// don't create a new IndexSetting object for every shard as this could cause a lot of garbage // don't create a new IndexSetting object for every shard as this could cause a lot of garbage
// on cluster restart if we allocate a boat load of shards // on cluster restart if we allocate a boat load of shards
final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index()); final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index());
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(shard.id()); final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id());
final boolean snapshotRestore = shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT; final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData); final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
final NodeShardsResult nodeShardsResult; final NodeShardsResult nodeShardsResult;
final boolean enoughAllocationsFound; final boolean enoughAllocationsFound;
if (inSyncAllocationIds.isEmpty()) { if (inSyncAllocationIds.isEmpty()) {
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new"; assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) :
"trying to allocated a primary with an empty allocation id set, but index is new";
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index // when we load an old index (after upgrading cluster) or restore a snapshot of an old index
// fall back to old version-based allocation mode // fall back to old version-based allocation mode
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty // Note that once the shard has been active, lastActiveAllocationIds will be non-empty
nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState); nodeShardsResult = buildVersionBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(unassignedShard.shardId()), shardState, logger);
if (snapshotRestore || recoverOnAnyNode) { if (snapshotRestore || recoverOnAnyNode) {
enoughAllocationsFound = nodeShardsResult.allocationsFound > 0; enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
} else { } else {
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult); enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
} }
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard); logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", unassignedShard.index(),
unassignedShard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, unassignedShard);
} else { } else {
assert inSyncAllocationIds.isEmpty() == false; assert inSyncAllocationIds.isEmpty() == false;
// use allocation ids to select nodes // use allocation ids to select nodes
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, nodeShardsResult = buildAllocationIdBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
allocation.getIgnoreNodes(shard.shardId()), inSyncAllocationIds, shardState); allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger);
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0; enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, inSyncAllocationIds); logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(),
unassignedShard.id(), nodeShardsResult.orderedAllocationCandidates.size(), unassignedShard, inSyncAllocationIds);
} }
if (enoughAllocationsFound == false){ if (enoughAllocationsFound == false) {
if (snapshotRestore) { if (snapshotRestore) {
// let BalancedShardsAllocator take care of allocating this shard // let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.recoverySource()); logger.debug("[{}][{}]: missing local data, will restore from [{}]",
unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource());
return UnassignedShardDecision.DECISION_NOT_TAKEN;
} else if (recoverOnAnyNode) { } else if (recoverOnAnyNode) {
// let BalancedShardsAllocator take care of allocating this shard // let BalancedShardsAllocator take care of allocating this shard
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id()); logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id());
return UnassignedShardDecision.DECISION_NOT_TAKEN;
} else { } else {
// we can't really allocate, so ignore it and continue // We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary.
unassignedIterator.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes()); // We could just be waiting for the node that holds the primary to start back up, in which case the allocation for
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound); // this shard will be picked up when the node joins and we do another allocation reroute
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]",
unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound);
return UnassignedShardDecision.noDecision(AllocationStatus.NO_VALID_SHARD_COPY,
"shard was previously allocated, but no valid shard copy could be found amongst the current nodes in the cluster");
} }
continue;
} }
final NodesToAllocate nodesToAllocate = buildNodesToAllocate( final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
allocation, nodeShardsResult.orderedAllocationCandidates, shard, false allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, false
); );
if (nodesToAllocate.yesNodeShards.isEmpty() == false) { if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0); DecidedNode decidedNode = nodesToAllocate.yesNodeShards.get(0);
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode()); logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation",
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes()); unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode());
final String nodeId = decidedNode.nodeShardState.getNode().getId();
return UnassignedShardDecision.yesDecision(
"the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]",
nodeId, decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain));
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) { } else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
// The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard // The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
// can be force-allocated to one of the nodes. // can be force-allocated to one of the nodes.
final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate( final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
allocation, nodeShardsResult.orderedAllocationCandidates, shard, true allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, true
); );
if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) { if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
NodeGatewayStartedShards nodeShardState = nodesToForceAllocate.yesNodeShards.get(0); final DecidedNode decidedNode = nodesToForceAllocate.yesNodeShards.get(0);
final NodeGatewayStartedShards nodeShardState = decidedNode.nodeShardState;
logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation", logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
shard.index(), shard.id(), shard, nodeShardState.getNode()); unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode());
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), final String nodeId = nodeShardState.getNode().getId();
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes()); return UnassignedShardDecision.yesDecision(
"allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data",
nodeId,
nodeShardState.allocationId(),
buildNodeDecisions(nodesToForceAllocate, explain));
} else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) { } else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation", logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
shard.index(), shard.id(), shard, nodesToForceAllocate.throttleNodeShards); unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards);
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes()); return UnassignedShardDecision.throttleDecision(
"allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries",
buildNodeDecisions(nodesToForceAllocate, explain));
} else { } else {
logger.debug("[{}][{}]: forced primary allocation denied [{}]", shard.index(), shard.id(), shard); logger.debug("[{}][{}]: forced primary allocation denied [{}]",
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_NO, allocation.changes()); unassignedShard.index(), unassignedShard.id(), unassignedShard);
return UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO,
"all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted",
buildNodeDecisions(nodesToForceAllocate, explain));
} }
} else { } else {
// we are throttling this, but we have enough to allocate to this node, ignore it for now // we are throttling this, since we are allowed to allocate to this node but there are enough allocations
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards); // taking place on the node currently, ignore it for now
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes()); logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards);
return UnassignedShardDecision.throttleDecision(
"allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries",
buildNodeDecisions(nodesToAllocate, explain));
} }
} }
/**
* Builds a map of nodes to the corresponding allocation decisions for those nodes.
*/
private static Map<String, Decision> buildNodeDecisions(NodesToAllocate nodesToAllocate, boolean explain) {
if (explain == false) {
// not in explain mode, no need to return node level decisions
return null;
}
Map<String, Decision> nodeDecisions = new LinkedHashMap<>();
for (final DecidedNode decidedNode : nodesToAllocate.yesNodeShards) {
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
}
for (final DecidedNode decidedNode : nodesToAllocate.throttleNodeShards) {
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
}
for (final DecidedNode decidedNode : nodesToAllocate.noNodeShards) {
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
}
return nodeDecisions;
} }
/** /**
@ -205,8 +260,10 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but * lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
* entries with matching allocation id are always at the front of the list. * entries with matching allocation id are always at the front of the list.
*/ */
protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes, protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard,
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) { Set<String> ignoreNodes, Set<String> lastActiveAllocationIds,
FetchResult<NodeGatewayStartedShards> shardState,
Logger logger) {
LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>(); LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();
LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>(); LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();
int numberOfAllocationsFound = 0; int numberOfAllocationsFound = 0;
@ -299,9 +356,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
List<NodeGatewayStartedShards> nodeShardStates, List<NodeGatewayStartedShards> nodeShardStates,
ShardRouting shardRouting, ShardRouting shardRouting,
boolean forceAllocate) { boolean forceAllocate) {
List<NodeGatewayStartedShards> yesNodeShards = new ArrayList<>(); List<DecidedNode> yesNodeShards = new ArrayList<>();
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>(); List<DecidedNode> throttledNodeShards = new ArrayList<>();
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>(); List<DecidedNode> noNodeShards = new ArrayList<>();
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) { for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId()); RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId());
if (node == null) { if (node == null) {
@ -310,12 +367,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
Decision decision = forceAllocate ? allocation.deciders().canForceAllocatePrimary(shardRouting, node, allocation) : Decision decision = forceAllocate ? allocation.deciders().canForceAllocatePrimary(shardRouting, node, allocation) :
allocation.deciders().canAllocate(shardRouting, node, allocation); allocation.deciders().canAllocate(shardRouting, node, allocation);
if (decision.type() == Decision.Type.THROTTLE) { DecidedNode decidedNode = new DecidedNode(nodeShardState, decision);
throttledNodeShards.add(nodeShardState); if (decision.type() == Type.THROTTLE) {
} else if (decision.type() == Decision.Type.NO) { throttledNodeShards.add(decidedNode);
noNodeShards.add(nodeShardState); } else if (decision.type() == Type.NO) {
noNodeShards.add(decidedNode);
} else { } else {
yesNodeShards.add(nodeShardState); yesNodeShards.add(decidedNode);
} }
} }
return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards)); return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards));
@ -325,8 +383,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
* Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to * Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to
* the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list. * the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list.
*/ */
NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes, static NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) { FetchResult<NodeGatewayStartedShards> shardState, Logger logger) {
final List<NodeGatewayStartedShards> allocationCandidates = new ArrayList<>(); final List<NodeGatewayStartedShards> allocationCandidates = new ArrayList<>();
int numberOfAllocationsFound = 0; int numberOfAllocationsFound = 0;
long highestVersion = ShardStateMetaData.NO_VERSION; long highestVersion = ShardStateMetaData.NO_VERSION;
@ -400,7 +458,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
&& IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings); && IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings);
} }
protected abstract AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation); protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
static class NodeShardsResult { static class NodeShardsResult {
public final List<NodeGatewayStartedShards> orderedAllocationCandidates; public final List<NodeGatewayStartedShards> orderedAllocationCandidates;
@ -413,16 +471,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
} }
static class NodesToAllocate { static class NodesToAllocate {
final List<NodeGatewayStartedShards> yesNodeShards; final List<DecidedNode> yesNodeShards;
final List<NodeGatewayStartedShards> throttleNodeShards; final List<DecidedNode> throttleNodeShards;
final List<NodeGatewayStartedShards> noNodeShards; final List<DecidedNode> noNodeShards;
public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards, public NodesToAllocate(List<DecidedNode> yesNodeShards, List<DecidedNode> throttleNodeShards, List<DecidedNode> noNodeShards) {
List<NodeGatewayStartedShards> throttleNodeShards,
List<NodeGatewayStartedShards> noNodeShards) {
this.yesNodeShards = yesNodeShards; this.yesNodeShards = yesNodeShards;
this.throttleNodeShards = throttleNodeShards; this.throttleNodeShards = throttleNodeShards;
this.noNodeShards = noNodeShards; this.noNodeShards = noNodeShards;
} }
} }
/**
* This class encapsulates the shard state retrieved from a node and the decision that was made
* by the allocator for allocating to the node that holds the shard copy.
*/
private static class DecidedNode {
final NodeGatewayStartedShards nodeShardState;
final Decision decision;
private DecidedNode(NodeGatewayStartedShards nodeShardState, Decision decision) {
this.nodeShardState = nodeShardState;
this.decision = decision;
}
}
} }

View File

@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
import com.carrotsearch.hppc.ObjectLongMap; import com.carrotsearch.hppc.ObjectLongMap;
import com.carrotsearch.hppc.cursors.ObjectCursor; import com.carrotsearch.hppc.cursors.ObjectCursor;
import com.carrotsearch.hppc.cursors.ObjectLongCursor; import com.carrotsearch.hppc.cursors.ObjectLongCursor;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.MetaData; import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RoutingNode; import org.elasticsearch.cluster.routing.RoutingNode;
@ -31,24 +31,25 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo; import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus; import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.RoutingChangesObserver;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation; import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
import org.elasticsearch.cluster.routing.allocation.decider.Decision; import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.component.AbstractComponent; import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.unit.ByteSizeValue; import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.index.store.StoreFileMetaData; import org.elasticsearch.index.store.StoreFileMetaData;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData; import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Objects; import java.util.Objects;
/** /**
*/ */
public abstract class ReplicaShardAllocator extends AbstractComponent { public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
public ReplicaShardAllocator(Settings settings) { public ReplicaShardAllocator(Settings settings) {
super(settings); super(settings);
@ -96,7 +97,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
continue; continue;
} }
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores); MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores, false);
if (matchingNodes.getNodeWithHighestMatch() != null) { if (matchingNodes.getNodeWithHighestMatch() != null) {
DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId()); DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId());
DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch(); DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch();
@ -128,37 +129,45 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
} }
} }
public void allocateUnassigned(RoutingAllocation allocation) { /**
final RoutingNodes routingNodes = allocation.routingNodes(); * Is the allocator responsible for allocating the given {@link ShardRouting}?
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator(); */
while (unassignedIterator.hasNext()) { private static boolean isResponsibleFor(final ShardRouting shard) {
ShardRouting shard = unassignedIterator.next(); return shard.primary() == false // must be a replica
if (shard.primary()) { && shard.unassigned() // must be unassigned
continue;
}
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one... // if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
if (shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { && shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED;
continue;
} }
@Override
public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
final RoutingAllocation allocation,
final Logger logger) {
if (isResponsibleFor(unassignedShard) == false) {
// this allocator is not responsible for deciding on this shard
return UnassignedShardDecision.DECISION_NOT_TAKEN;
}
final RoutingNodes routingNodes = allocation.routingNodes();
final boolean explain = allocation.debugDecision();
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing // pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
Decision decision = canBeAllocatedToAtLeastOneNode(shard, allocation); Tuple<Decision, Map<String, Decision>> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain);
if (decision.type() != Decision.Type.YES) { if (allocateDecision.v1().type() != Decision.Type.YES) {
logger.trace("{}: ignoring allocation, can't be allocated on any node", shard); logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard);
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes()); return UnassignedShardDecision.noDecision(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1()),
continue; "all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard",
allocateDecision.v2());
} }
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(shard, allocation); AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(unassignedShard, allocation);
if (shardStores.hasData() == false) { if (shardStores.hasData() == false) {
logger.trace("{}: ignoring allocation, still fetching shard stores", shard); logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard);
allocation.setHasPendingAsyncFetch(); allocation.setHasPendingAsyncFetch();
unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes()); return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
continue; // still fetching "still fetching shard state from the nodes in the cluster");
} }
ShardRouting primaryShard = routingNodes.activePrimary(shard.shardId()); ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId());
assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary"; assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores); TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
if (primaryStore == null) { if (primaryStore == null) {
@ -166,48 +175,42 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// we want to let the replica be allocated in order to expose the actual problem with the primary that the replica // we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
// will try and recover from // will try and recover from
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData // Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard); logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard);
continue; return UnassignedShardDecision.DECISION_NOT_TAKEN;
} }
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores); MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain);
assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions";
if (matchingNodes.getNodeWithHighestMatch() != null) { if (matchingNodes.getNodeWithHighestMatch() != null) {
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId()); RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
// we only check on THROTTLE since we checked before before on NO // we only check on THROTTLE since we checked before before on NO
decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation); Decision decision = allocation.deciders().canAllocate(unassignedShard, nodeWithHighestMatch, allocation);
if (decision.type() == Decision.Type.THROTTLE) { if (decision.type() == Decision.Type.THROTTLE) {
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node()); logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store",
// we are throttling this, but we have enough to allocate to this node, ignore it for now unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes()); // we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now
return UnassignedShardDecision.throttleDecision(
"returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one " +
"of those copies", matchingNodes.nodeDecisions);
} else { } else {
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node()); logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store",
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
// we found a match // we found a match
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes()); return UnassignedShardDecision.yesDecision(
} "allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store",
} else if (matchingNodes.hasAnyData() == false) { nodeWithHighestMatch.nodeId(), null, matchingNodes.nodeDecisions);
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes());
}
} }
} else if (matchingNodes.hasAnyData() == false && unassignedShard.unassignedInfo().isDelayed()) {
// if we didn't manage to find *any* data (regardless of matching sizes), and the replica is
// unassigned due to a node leaving, so we delay allocation of this replica to see if the
// node with the shard copy will rejoin so we can re-use the copy it has
logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard);
return UnassignedShardDecision.noDecision(AllocationStatus.DELAYED_ALLOCATION,
"not allocating this shard, no nodes contain data for the replica and allocation is delayed");
} }
/** return UnassignedShardDecision.DECISION_NOT_TAKEN;
* Check if the allocation of the replica is to be delayed. Compute the delay and if it is delayed, add it to the ignore unassigned list
* Note: we only care about replica in delayed allocation, since if we have an unassigned primary it
* will anyhow wait to find an existing copy of the shard to be allocated
* Note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
*
* PUBLIC FOR TESTS!
*
* @param unassignedIterator iterator over unassigned shards
* @param shard the shard which might be delayed
*/
public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard, RoutingChangesObserver changes) {
if (shard.unassignedInfo().isDelayed()) {
logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard);
unassignedIterator.removeAndIgnore(AllocationStatus.DELAYED_ALLOCATION, changes);
}
} }
/** /**
@ -215,10 +218,15 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
* *
* Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one * Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one
* node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided * node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided
* YES or THROTTLE. * YES or THROTTLE). If the explain flag is turned on AND the decision is NO or THROTTLE, then this method
* also returns a map of nodes to decisions (second value in the tuple) to use for explanations; if the explain
* flag is off, the second value in the return tuple will be null.
*/ */
private Decision canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) { private Tuple<Decision, Map<String, Decision>> canBeAllocatedToAtLeastOneNode(ShardRouting shard,
RoutingAllocation allocation,
boolean explain) {
Decision madeDecision = Decision.NO; Decision madeDecision = Decision.NO;
Map<String, Decision> nodeDecisions = new HashMap<>();
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().getDataNodes().values()) { for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().getDataNodes().values()) {
RoutingNode node = allocation.routingNodes().node(cursor.value.getId()); RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
if (node == null) { if (node == null) {
@ -227,13 +235,16 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// if we can't allocate it on a node, ignore it, for example, this handles // if we can't allocate it on a node, ignore it, for example, this handles
// cases for only allocating a replica after a primary // cases for only allocating a replica after a primary
Decision decision = allocation.deciders().canAllocate(shard, node, allocation); Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (explain) {
nodeDecisions.put(node.nodeId(), decision);
}
if (decision.type() == Decision.Type.YES) { if (decision.type() == Decision.Type.YES) {
return decision; return Tuple.tuple(decision, null);
} else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) { } else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) {
madeDecision = decision; madeDecision = decision;
} }
} }
return madeDecision; return Tuple.tuple(madeDecision, explain ? nodeDecisions : null);
} }
/** /**
@ -254,8 +265,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation, private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore, TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) { AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data,
boolean explain) {
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>(); ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
Map<String, Decision> nodeDecisions = new HashMap<>();
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) { for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
DiscoveryNode discoNode = nodeStoreEntry.getKey(); DiscoveryNode discoNode = nodeStoreEntry.getKey();
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData(); TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
@ -273,6 +286,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
// we only check for NO, since if this node is THROTTLING and it has enough "same data" // we only check for NO, since if this node is THROTTLING and it has enough "same data"
// then we will try and assign it next time // then we will try and assign it next time
Decision decision = allocation.deciders().canAllocate(shard, node, allocation); Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
if (explain) {
nodeDecisions.put(node.nodeId(), decision);
}
if (decision.type() == Decision.Type.NO) { if (decision.type() == Decision.Type.NO) {
continue; continue;
} }
@ -297,7 +314,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
} }
} }
return new MatchingNodes(nodesToSize); return new MatchingNodes(nodesToSize, explain ? nodeDecisions : null);
} }
protected abstract AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation); protected abstract AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation);
@ -305,9 +322,12 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
static class MatchingNodes { static class MatchingNodes {
private final ObjectLongMap<DiscoveryNode> nodesToSize; private final ObjectLongMap<DiscoveryNode> nodesToSize;
private final DiscoveryNode nodeWithHighestMatch; private final DiscoveryNode nodeWithHighestMatch;
@Nullable
private final Map<String, Decision> nodeDecisions;
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize) { public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize, @Nullable Map<String, Decision> nodeDecisions) {
this.nodesToSize = nodesToSize; this.nodesToSize = nodesToSize;
this.nodeDecisions = nodeDecisions;
long highestMatchSize = 0; long highestMatchSize = 0;
DiscoveryNode highestMatchNode = null; DiscoveryNode highestMatchNode = null;
@ -340,5 +360,13 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
public boolean hasAnyData() { public boolean hasAnyData() {
return nodesToSize.isEmpty() == false; return nodesToSize.isEmpty() == false;
} }
/**
* The decisions map for all nodes with a shard copy, if available.
*/
@Nullable
public Map<String, Decision> getNodeDecisions() {
return nodeDecisions;
}
} }
} }

View File

@ -76,11 +76,11 @@ public final class ShardGetService extends AbstractIndexShardComponent {
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count()); return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
} }
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) { public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
currentMetric.inc(); currentMetric.inc();
try { try {
long now = System.nanoTime(); long now = System.nanoTime();
GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, ignoreErrorsOnGeneratedFields); GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext);
if (getResult.isExists()) { if (getResult.isExists()) {
existsMetric.inc(System.nanoTime() - now); existsMetric.inc(System.nanoTime() - now);
@ -139,7 +139,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
return FetchSourceContext.DO_NOT_FETCH_SOURCE; return FetchSourceContext.DO_NOT_FETCH_SOURCE;
} }
private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) { private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields); fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
Engine.GetResult get = null; Engine.GetResult get = null;

View File

@ -113,6 +113,7 @@ import org.elasticsearch.script.ScriptModule;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchModule; import org.elasticsearch.search.SearchModule;
import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.snapshots.SnapshotShardsService; import org.elasticsearch.snapshots.SnapshotShardsService;
import org.elasticsearch.snapshots.SnapshotsService; import org.elasticsearch.snapshots.SnapshotsService;
import org.elasticsearch.tasks.TaskResultsService; import org.elasticsearch.tasks.TaskResultsService;
@ -122,7 +123,6 @@ import org.elasticsearch.transport.TransportService;
import org.elasticsearch.tribe.TribeService; import org.elasticsearch.tribe.TribeService;
import org.elasticsearch.watcher.ResourceWatcherService; import org.elasticsearch.watcher.ResourceWatcherService;
import javax.management.MBeanServerPermission;
import java.io.BufferedWriter; import java.io.BufferedWriter;
import java.io.Closeable; import java.io.Closeable;
import java.io.IOException; import java.io.IOException;
@ -133,13 +133,11 @@ import java.nio.charset.Charset;
import java.nio.file.Files; import java.nio.file.Files;
import java.nio.file.Path; import java.nio.file.Path;
import java.nio.file.StandardCopyOption; import java.nio.file.StandardCopyOption;
import java.security.AccessControlException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.List; import java.util.List;
import java.util.Locale;
import java.util.Map; import java.util.Map;
import java.util.concurrent.CountDownLatch; import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeUnit;
@ -383,12 +381,8 @@ public class Node implements Closeable {
b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader); b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader);
b.bind(MetaStateService.class).toInstance(metaStateService); b.bind(MetaStateService.class).toInstance(metaStateService);
b.bind(IndicesService.class).toInstance(indicesService); b.bind(IndicesService.class).toInstance(indicesService);
Class<? extends SearchService> searchServiceImpl = pickSearchServiceImplementation(); b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
if (searchServiceImpl == SearchService.class) { threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase()));
b.bind(SearchService.class).asEagerSingleton();
} else {
b.bind(SearchService.class).to(searchServiceImpl).asEagerSingleton();
}
pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p)); pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p));
} }
@ -793,10 +787,12 @@ public class Node implements Closeable {
} }
/** /**
* Select the search service implementation. Overrided by tests. * Creates a new the SearchService. This method can be overwritten by tests to inject mock implementations.
*/ */
protected Class<? extends SearchService> pickSearchServiceImplementation() { protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
return SearchService.class; ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
FetchPhase fetchPhase) {
return new SearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
} }
/** /**

View File

@ -60,8 +60,8 @@ class ListPluginsCommand extends SettingCommand {
} }
Collections.sort(plugins); Collections.sort(plugins);
for (final Path plugin : plugins) { for (final Path plugin : plugins) {
terminal.println(plugin.getFileName().toString());
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath())); PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath()));
terminal.println(plugin.getFileName().toString() + "@" + info.getVersion());
terminal.println(Terminal.Verbosity.VERBOSE, info.toString()); terminal.println(Terminal.Verbosity.VERBOSE, info.toString());
} }
} }

View File

@ -58,7 +58,6 @@ public class RestGetAction extends BaseRestHandler {
getRequest.parent(request.param("parent")); getRequest.parent(request.param("parent"));
getRequest.preference(request.param("preference")); getRequest.preference(request.param("preference"));
getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime())); getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
getRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false));
String sField = request.param("fields"); String sField = request.param("fields");
if (sField != null) { if (sField != null) {

View File

@ -59,7 +59,6 @@ public class RestMultiGetAction extends BaseRestHandler {
multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh())); multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh()));
multiGetRequest.preference(request.param("preference")); multiGetRequest.preference(request.param("preference"));
multiGetRequest.realtime(request.paramAsBoolean("realtime", multiGetRequest.realtime())); multiGetRequest.realtime(request.paramAsBoolean("realtime", multiGetRequest.realtime()));
multiGetRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false));
String[] sFields = null; String[] sFields = null;
String sField = request.param("fields"); String sField = request.param("fields");

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.search.internal; package org.elasticsearch.search;
import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanClause.Occur; import org.apache.lucene.search.BooleanClause.Occur;
@ -53,8 +53,6 @@ import org.elasticsearch.index.query.QueryShardContext;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.similarity.SimilarityService; import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.SearchExtBuilder;
import org.elasticsearch.search.SearchShardTarget;
import org.elasticsearch.search.aggregations.SearchContextAggregations; import org.elasticsearch.search.aggregations.SearchContextAggregations;
import org.elasticsearch.search.dfs.DfsSearchResult; import org.elasticsearch.search.dfs.DfsSearchResult;
import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchPhase;
@ -64,6 +62,10 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.FetchSourceContext; import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight; import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
import org.elasticsearch.search.internal.ContextIndexSearcher;
import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext;
import org.elasticsearch.search.internal.ShardSearchRequest;
import org.elasticsearch.search.lookup.SearchLookup; import org.elasticsearch.search.lookup.SearchLookup;
import org.elasticsearch.search.profile.Profilers; import org.elasticsearch.search.profile.Profilers;
import org.elasticsearch.search.query.QueryPhaseExecutionException; import org.elasticsearch.search.query.QueryPhaseExecutionException;
@ -80,7 +82,7 @@ import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
public class DefaultSearchContext extends SearchContext { final class DefaultSearchContext extends SearchContext {
private final long id; private final long id;
private final ShardSearchRequest request; private final ShardSearchRequest request;
@ -123,10 +125,7 @@ public class DefaultSearchContext extends SearchContext {
* things like the type filter or alias filters. * things like the type filter or alias filters.
*/ */
private ParsedQuery originalQuery; private ParsedQuery originalQuery;
/**
* Just like originalQuery but with the filters from types, aliases and slice applied.
*/
private ParsedQuery filteredQuery;
/** /**
* The query to actually execute. * The query to actually execute.
*/ */
@ -151,7 +150,7 @@ public class DefaultSearchContext extends SearchContext {
private final QueryShardContext queryShardContext; private final QueryShardContext queryShardContext;
private FetchPhase fetchPhase; private FetchPhase fetchPhase;
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher, DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
IndexService indexService, IndexShard indexShard, ScriptService scriptService, IndexService indexService, IndexShard indexShard, ScriptService scriptService,
BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout, BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout,
FetchPhase fetchPhase) { FetchPhase fetchPhase) {
@ -187,7 +186,7 @@ public class DefaultSearchContext extends SearchContext {
* Should be called before executing the main query and after all other parameters have been set. * Should be called before executing the main query and after all other parameters have been set.
*/ */
@Override @Override
public void preProcess() { public void preProcess(boolean rewrite) {
if (hasOnlySuggest() ) { if (hasOnlySuggest() ) {
return; return;
} }
@ -241,20 +240,22 @@ public class DefaultSearchContext extends SearchContext {
if (queryBoost() != AbstractQueryBuilder.DEFAULT_BOOST) { if (queryBoost() != AbstractQueryBuilder.DEFAULT_BOOST) {
parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new WeightFactorFunction(queryBoost)), parsedQuery())); parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new WeightFactorFunction(queryBoost)), parsedQuery()));
} }
filteredQuery(buildFilteredQuery()); this.query = buildFilteredQuery();
if (rewrite) {
try { try {
this.query = searcher().rewrite(this.query); this.query = searcher.rewrite(query);
} catch (IOException e) { } catch (IOException e) {
throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e); throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e);
} }
} }
private ParsedQuery buildFilteredQuery() {
Query searchFilter = searchFilter(queryShardContext.getTypes());
if (searchFilter == null) {
return originalQuery;
} }
Query result;
private Query buildFilteredQuery() {
final Query searchFilter = searchFilter(queryShardContext.getTypes());
if (searchFilter == null) {
return originalQuery.query();
}
final Query result;
if (Queries.isConstantMatchAllQuery(query())) { if (Queries.isConstantMatchAllQuery(query())) {
result = new ConstantScoreQuery(searchFilter); result = new ConstantScoreQuery(searchFilter);
} else { } else {
@ -263,7 +264,7 @@ public class DefaultSearchContext extends SearchContext {
.add(searchFilter, Occur.FILTER) .add(searchFilter, Occur.FILTER)
.build(); .build();
} }
return new ParsedQuery(result, originalQuery); return result;
} }
@Override @Override
@ -618,15 +619,6 @@ public class DefaultSearchContext extends SearchContext {
return this; return this;
} }
public ParsedQuery filteredQuery() {
return filteredQuery;
}
private void filteredQuery(ParsedQuery filteredQuery) {
this.filteredQuery = filteredQuery;
this.query = filteredQuery.query();
}
@Override @Override
public ParsedQuery parsedQuery() { public ParsedQuery parsedQuery() {
return this.originalQuery; return this.originalQuery;

View File

@ -95,7 +95,6 @@ import org.elasticsearch.plugins.SearchPlugin.QuerySpec;
import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec; import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec;
import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec; import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec;
import org.elasticsearch.plugins.SearchPlugin.SearchExtensionSpec; import org.elasticsearch.plugins.SearchPlugin.SearchExtensionSpec;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.aggregations.AggregationBuilder; import org.elasticsearch.search.aggregations.AggregationBuilder;
import org.elasticsearch.search.aggregations.Aggregator; import org.elasticsearch.search.aggregations.Aggregator;
import org.elasticsearch.search.aggregations.AggregatorParsers; import org.elasticsearch.search.aggregations.AggregatorParsers;
@ -243,7 +242,6 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel; import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator; import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator;
import org.elasticsearch.search.controller.SearchPhaseController;
import org.elasticsearch.search.fetch.FetchPhase; import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase; import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
@ -384,7 +382,6 @@ public class SearchModule extends AbstractModule {
bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry); bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry);
bind(SearchRequestParsers.class).toInstance(searchRequestParsers); bind(SearchRequestParsers.class).toInstance(searchRequestParsers);
bind(SearchExtRegistry.class).toInstance(searchExtParserRegistry); bind(SearchExtRegistry.class).toInstance(searchExtParserRegistry);
configureSearch();
} }
} }
@ -574,13 +571,6 @@ public class SearchModule extends AbstractModule {
} }
} }
protected void configureSearch() {
// configure search private classes...
bind(SearchPhaseController.class).asEagerSingleton();
bind(FetchPhase.class).toInstance(new FetchPhase(fetchSubPhases));
bind(SearchTransportService.class).asEagerSingleton();
}
private void registerShapes() { private void registerShapes() {
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) { if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
ShapeBuilders.register(namedWriteables); ShapeBuilders.register(namedWriteables);
@ -817,4 +807,8 @@ public class SearchModule extends AbstractModule {
queryParserRegistry.register(spec.getParser(), spec.getName()); queryParserRegistry.register(spec.getParser(), spec.getName());
namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader())); namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
} }
public FetchPhase getFetchPhase() {
return new FetchPhase(fetchSubPhases);
}
} }

View File

@ -29,9 +29,7 @@ import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable; import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.ParseFieldMatcher; import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.component.AbstractLifecycleComponent; import org.elasticsearch.common.component.AbstractLifecycleComponent;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.lucene.Lucene; import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting; import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property; import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
@ -66,7 +64,6 @@ import org.elasticsearch.search.fetch.ShardFetchRequest;
import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext; import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField; import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField;
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder; import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
import org.elasticsearch.search.internal.DefaultSearchContext;
import org.elasticsearch.search.internal.InternalScrollSearchRequest; import org.elasticsearch.search.internal.InternalScrollSearchRequest;
import org.elasticsearch.search.internal.ScrollContext; import org.elasticsearch.search.internal.ScrollContext;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
@ -141,10 +138,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
private final ParseFieldMatcher parseFieldMatcher; private final ParseFieldMatcher parseFieldMatcher;
@Inject public SearchService(ClusterService clusterService, IndicesService indicesService,
public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) { ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) {
super(settings); super(clusterService.getSettings());
this.parseFieldMatcher = new ParseFieldMatcher(settings); this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.threadPool = threadPool; this.threadPool = threadPool;
this.clusterService = clusterService; this.clusterService = clusterService;
@ -160,7 +156,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME); this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME);
defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings); defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout); clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
} }
private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) { private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) {
@ -520,16 +516,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
} }
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException { final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher; DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher);
DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
indexService,
indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
defaultSearchTimeout, fetchPhase);
SearchContext.setCurrent(context); SearchContext.setCurrent(context);
try { try {
request.rewrite(context.getQueryShardContext()); request.rewrite(context.getQueryShardContext());
@ -572,6 +560,18 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
return context; return context;
} }
public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) {
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
IndexShard indexShard = indexService.getShard(request.shardId().getId());
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
indexService,
indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
timeout, fetchPhase);
}
private void freeAllContextForIndex(Index index) { private void freeAllContextForIndex(Index index) {
assert index != null; assert index != null;
for (SearchContext ctx : activeContexts.values()) { for (SearchContext ctx : activeContexts.values()) {

View File

@ -36,9 +36,6 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
return; return;
} }
SourceLookup source = context.lookup().source(); SourceLookup source = context.lookup().source();
if (source.internalSourceRef() == null) {
return; // source disabled in the mapping
}
FetchSourceContext fetchSourceContext = context.fetchSourceContext(); FetchSourceContext fetchSourceContext = context.fetchSourceContext();
assert fetchSourceContext.fetchSource(); assert fetchSourceContext.fetchSource();
if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) { if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) {
@ -46,6 +43,11 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
return; return;
} }
if (source.internalSourceRef() == null) {
throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " +
"for index [" + context.indexShard().shardId().getIndexName() + "]");
}
Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes()); Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes());
try { try {
final int initialCapacity = Math.min(1024, source.internalSourceRef().length()); final int initialCapacity = Math.min(1024, source.internalSourceRef().length());

View File

@ -78,10 +78,7 @@ public final class CustomQueryScorer extends QueryScorer {
@Override @Override
protected void extractUnknownQuery(Query query, protected void extractUnknownQuery(Query query,
Map<String, WeightedSpanTerm> terms) throws IOException { Map<String, WeightedSpanTerm> terms) throws IOException {
if (query instanceof FunctionScoreQuery) { if (query instanceof FiltersFunctionScoreQuery) {
query = ((FunctionScoreQuery) query).getSubQuery();
extract(query, 1F, terms);
} else if (query instanceof FiltersFunctionScoreQuery) {
query = ((FiltersFunctionScoreQuery) query).getSubQuery(); query = ((FiltersFunctionScoreQuery) query).getSubQuery();
extract(query, 1F, terms); extract(query, 1F, terms);
} else if (terms.isEmpty()) { } else if (terms.isEmpty()) {
@ -97,9 +94,11 @@ public final class CustomQueryScorer extends QueryScorer {
} else if (query instanceof HasChildQueryBuilder.LateParsingQuery) { } else if (query instanceof HasChildQueryBuilder.LateParsingQuery) {
// skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999 // skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999
return; return;
} } else if (query instanceof FunctionScoreQuery) {
super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms);
} else {
super.extract(query, boost, terms); super.extract(query, boost, terms);
} }
} }
}
} }

View File

@ -100,8 +100,8 @@ public abstract class FilteredSearchContext extends SearchContext {
} }
@Override @Override
public void preProcess() { public void preProcess(boolean rewrite) {
in.preProcess(); in.preProcess(rewrite);
} }
@Override @Override

View File

@ -139,8 +139,9 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas
/** /**
* Should be called before executing the main query and after all other parameters have been set. * Should be called before executing the main query and after all other parameters have been set.
* @param rewrite if the set query should be rewritten against the searcher returned from {@link #searcher()}
*/ */
public abstract void preProcess(); public abstract void preProcess(boolean rewrite);
public abstract Query searchFilter(String[] types); public abstract Query searchFilter(String[] types);

View File

@ -81,14 +81,11 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
this.nowInMillis = nowInMillis; this.nowInMillis = nowInMillis;
} }
public ShardSearchLocalRequest(String[] types, long nowInMillis) { public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) {
this.types = types; this.types = types;
this.nowInMillis = nowInMillis; this.nowInMillis = nowInMillis;
}
public ShardSearchLocalRequest(String[] types, long nowInMillis, String[] filteringAliases) {
this(types, nowInMillis);
this.filteringAliases = filteringAliases; this.filteringAliases = filteringAliases;
this.shardId = shardId;
} }
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types, public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,

View File

@ -77,7 +77,7 @@ public class SubSearchContext extends FilteredSearchContext {
} }
@Override @Override
public void preProcess() { public void preProcess(boolean rewrite) {
} }
@Override @Override

View File

@ -85,7 +85,7 @@ public class QueryPhase implements SearchPhase {
@Override @Override
public void preProcess(SearchContext context) { public void preProcess(SearchContext context) {
context.preProcess(); context.preProcess(true);
} }
@Override @Override

View File

@ -86,7 +86,7 @@ import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.script.MockScriptPlugin; import org.elasticsearch.script.MockScriptPlugin;
import org.elasticsearch.script.Script; import org.elasticsearch.script.Script;
import org.elasticsearch.script.ScriptService; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.action.SearchTransportService; import org.elasticsearch.action.search.SearchTransportService;
import org.elasticsearch.tasks.Task; import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase;
import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.ClusterScope;

View File

@ -42,8 +42,6 @@ public class MultiGetShardRequestTests extends ESTestCase {
if (randomBoolean()) { if (randomBoolean()) {
multiGetRequest.refresh(true); multiGetRequest.refresh(true);
} }
multiGetRequest.ignoreErrorsOnGeneratedFields(randomBoolean());
MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0); MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0);
int numItems = iterations(10, 30); int numItems = iterations(10, 30);
for (int i = 0; i < numItems; i++) { for (int i = 0; i < numItems; i++) {
@ -79,7 +77,6 @@ public class MultiGetShardRequestTests extends ESTestCase {
assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference())); assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference()));
assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime())); assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime()));
assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh())); assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh()));
assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields()));
assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size())); assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size()));
for (int i = 0; i < multiGetShardRequest2.items.size(); i++) { for (int i = 0; i < multiGetShardRequest2.items.size(); i++) {
MultiGetRequest.Item item = multiGetShardRequest.items.get(i); MultiGetRequest.Item item = multiGetShardRequest.items.get(i);

View File

@ -17,10 +17,11 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.search.controller; package org.elasticsearch.action.search;
import org.apache.lucene.search.ScoreDoc; import org.apache.lucene.search.ScoreDoc;
import org.apache.lucene.search.TopDocs; import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.search.SearchPhaseController;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.text.Text; import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;

View File

@ -49,6 +49,10 @@ public class WildcardExpressionResolverTests extends ESTestCase {
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY"))); assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY")));
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0));
} }
public void testConvertWildcardsTests() { public void testConvertWildcardsTests() {

View File

@ -0,0 +1,116 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.cluster.routing.allocation;
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
import org.elasticsearch.test.ESTestCase;
import java.util.Collections;
import java.util.HashMap;
import java.util.Map;
/**
* Unit tests for the {@link UnassignedShardDecision} class.
*/
public class UnassignedShardDecisionTests extends ESTestCase {
public void testDecisionNotTaken() {
UnassignedShardDecision unassignedShardDecision = UnassignedShardDecision.DECISION_NOT_TAKEN;
assertFalse(unassignedShardDecision.isDecisionTaken());
assertNull(unassignedShardDecision.getFinalDecision());
assertNull(unassignedShardDecision.getAllocationStatus());
assertNull(unassignedShardDecision.getAllocationId());
assertNull(unassignedShardDecision.getAssignedNodeId());
assertNull(unassignedShardDecision.getFinalExplanation());
assertNull(unassignedShardDecision.getNodeDecisions());
expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalDecisionSafe());
expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalExplanationSafe());
}
public void testNoDecision() {
final AllocationStatus allocationStatus = randomFrom(
AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA
);
UnassignedShardDecision noDecision = UnassignedShardDecision.noDecision(allocationStatus, "something is wrong");
assertTrue(noDecision.isDecisionTaken());
assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type());
assertEquals(allocationStatus, noDecision.getAllocationStatus());
assertEquals("something is wrong", noDecision.getFinalExplanation());
assertNull(noDecision.getNodeDecisions());
assertNull(noDecision.getAssignedNodeId());
assertNull(noDecision.getAllocationId());
Map<String, Decision> nodeDecisions = new HashMap<>();
nodeDecisions.put("node1", Decision.NO);
nodeDecisions.put("node2", Decision.NO);
noDecision = UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, "something is wrong", nodeDecisions);
assertTrue(noDecision.isDecisionTaken());
assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type());
assertEquals(AllocationStatus.DECIDERS_NO, noDecision.getAllocationStatus());
assertEquals("something is wrong", noDecision.getFinalExplanation());
assertEquals(nodeDecisions, noDecision.getNodeDecisions());
assertNull(noDecision.getAssignedNodeId());
assertNull(noDecision.getAllocationId());
// test bad values
expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(null, "a"));
expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, null));
}
public void testThrottleDecision() {
Map<String, Decision> nodeDecisions = new HashMap<>();
nodeDecisions.put("node1", Decision.NO);
nodeDecisions.put("node2", Decision.THROTTLE);
UnassignedShardDecision throttleDecision = UnassignedShardDecision.throttleDecision("too much happening", nodeDecisions);
assertTrue(throttleDecision.isDecisionTaken());
assertEquals(Decision.Type.THROTTLE, throttleDecision.getFinalDecision().type());
assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus());
assertEquals("too much happening", throttleDecision.getFinalExplanation());
assertEquals(nodeDecisions, throttleDecision.getNodeDecisions());
assertNull(throttleDecision.getAssignedNodeId());
assertNull(throttleDecision.getAllocationId());
// test bad values
expectThrows(NullPointerException.class, () -> UnassignedShardDecision.throttleDecision(null, Collections.emptyMap()));
}
public void testYesDecision() {
Map<String, Decision> nodeDecisions = new HashMap<>();
nodeDecisions.put("node1", Decision.YES);
nodeDecisions.put("node2", Decision.NO);
String allocId = randomBoolean() ? "allocId" : null;
UnassignedShardDecision yesDecision = UnassignedShardDecision.yesDecision(
"node was very kind", "node1", allocId, nodeDecisions
);
assertTrue(yesDecision.isDecisionTaken());
assertEquals(Decision.Type.YES, yesDecision.getFinalDecision().type());
assertNull(yesDecision.getAllocationStatus());
assertEquals("node was very kind", yesDecision.getFinalExplanation());
assertEquals(nodeDecisions, yesDecision.getNodeDecisions());
assertEquals("node1", yesDecision.getAssignedNodeId());
assertEquals(allocId, yesDecision.getAllocationId());
expectThrows(NullPointerException.class,
() -> UnassignedShardDecision.yesDecision(null, "a", randomBoolean() ? "a" : null, Collections.emptyMap()));
expectThrows(NullPointerException.class,
() -> UnassignedShardDecision.yesDecision("a", null, null, Collections.emptyMap()));
}
}

View File

@ -20,25 +20,44 @@
package org.elasticsearch.discovery.zen; package org.elasticsearch.discovery.zen;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.cluster.ClusterChangedEvent;
import org.elasticsearch.cluster.ClusterName; import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNode.Role;
import org.elasticsearch.cluster.node.DiscoveryNodes; import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
import org.elasticsearch.discovery.zen.ping.ZenPing; import org.elasticsearch.discovery.zen.ping.ZenPing;
import org.elasticsearch.discovery.zen.ping.ZenPingService;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener;
import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.MockNode;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet; import java.util.HashSet;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors; import java.util.stream.Collectors;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet; import static java.util.Collections.emptySet;
import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState; import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState;
import static org.elasticsearch.discovery.zen.elect.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
import static org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.createMockNode;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -107,7 +126,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
ArrayList<DiscoveryNode> masterNodes = new ArrayList<>(); ArrayList<DiscoveryNode> masterNodes = new ArrayList<>();
ArrayList<DiscoveryNode> allNodes = new ArrayList<>(); ArrayList<DiscoveryNode> allNodes = new ArrayList<>();
for (int i = randomIntBetween(10, 20); i >= 0; i--) { for (int i = randomIntBetween(10, 20); i >= 0; i--) {
Set<DiscoveryNode.Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values()))); Set<Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values())));
DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(), DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(),
roles, Version.CURRENT); roles, Version.CURRENT);
responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean())); responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean()));
@ -127,4 +146,80 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
assertThat(filteredNodes, equalTo(allNodes)); assertThat(filteredNodes, equalTo(allNodes));
} }
} }
public void testNodesUpdatedAfterClusterStatePublished() throws Exception {
ThreadPool threadPool = new TestThreadPool(getClass().getName());
// randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure
int minMasterNodes = randomBoolean() ? 3 : 1;
Settings settings = Settings.builder()
.put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build();
Map<String, MockNode> nodes = new HashMap<>();
ZenDiscovery zenDiscovery = null;
ClusterService clusterService = null;
try {
Set<DiscoveryNode> expectedFDNodes = null;
// create master node and its mocked up services
MockNode master = createMockNode("master", settings, null, threadPool, logger, nodes).setAsMaster();
ClusterState state = master.clusterState; // initial cluster state
// build the zen discovery and cluster service
clusterService = createClusterService(threadPool, master.discoveryNode);
setState(clusterService, state);
zenDiscovery = buildZenDiscovery(settings, master, clusterService, threadPool);
// a new cluster state with a new discovery node (we will test if the cluster state
// was updated by the presence of this node in NodesFaultDetection)
MockNode newNode = createMockNode("new_node", settings, null, threadPool, logger, nodes);
ClusterState newState = ClusterState.builder(state).incrementVersion().nodes(
DiscoveryNodes.builder(state.nodes()).add(newNode.discoveryNode).masterNodeId(master.discoveryNode.getId())
).build();
try {
// publishing a new cluster state
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state);
AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1);
expectedFDNodes = zenDiscovery.getFaultDetectionNodes();
zenDiscovery.publish(clusterChangedEvent, listener);
listener.await(1, TimeUnit.HOURS);
// publish was a success, update expected FD nodes based on new cluster state
expectedFDNodes = fdNodesForState(newState, master.discoveryNode);
} catch (Discovery.FailedToCommitClusterStateException e) {
// not successful, so expectedFDNodes above should remain what it was originally assigned
assertEquals(3, minMasterNodes); // ensure min master nodes is the higher value, otherwise we shouldn't fail
}
assertEquals(expectedFDNodes, zenDiscovery.getFaultDetectionNodes());
} finally {
// clean close of transport service and publish action for each node
zenDiscovery.close();
clusterService.close();
for (MockNode curNode : nodes.values()) {
curNode.action.close();
curNode.service.close();
}
terminate(threadPool);
}
}
private ZenDiscovery buildZenDiscovery(Settings settings, MockNode master, ClusterService clusterService, ThreadPool threadPool) {
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
ZenPingService zenPingService = new ZenPingService(settings, Collections.emptySet());
ElectMasterService electMasterService = new ElectMasterService(settings);
ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, master.service, clusterService,
clusterSettings, zenPingService, electMasterService);
zenDiscovery.start();
return zenDiscovery;
}
private Set<DiscoveryNode> fdNodesForState(ClusterState clusterState, DiscoveryNode localNode) {
final Set<DiscoveryNode> discoveryNodes = new HashSet<>();
clusterState.getNodes().getNodes().valuesIt().forEachRemaining(discoveryNode -> {
// the local node isn't part of the nodes that are pinged (don't ping ourselves)
if (discoveryNode.getId().equals(localNode.getId()) == false) {
discoveryNodes.add(discoveryNode);
}
});
return discoveryNodes;
}
} }

View File

@ -145,21 +145,22 @@ public class PublishClusterStateActionTests extends ESTestCase {
} }
public MockNode createMockNode(final String name) throws Exception { public MockNode createMockNode(final String name) throws Exception {
return createMockNode(name, Settings.EMPTY); return createMockNode(name, Settings.EMPTY, null);
}
public MockNode createMockNode(String name, Settings settings) throws Exception {
return createMockNode(name, settings, null);
} }
public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception { public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception {
return createMockNode(name, basSettings, listener, threadPool, logger, nodes);
}
public static MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener,
ThreadPool threadPool, Logger logger, Map<String, MockNode> nodes) throws Exception {
final Settings settings = Settings.builder() final Settings settings = Settings.builder()
.put("name", name) .put("name", name)
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING") .put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
.put(basSettings) .put(basSettings)
.build(); .build();
MockTransportService service = buildTransportService(settings); MockTransportService service = buildTransportService(settings, threadPool);
DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(), DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(),
NodeEnvironment.generateNodeId(settings)); NodeEnvironment.generateNodeId(settings));
MockNode node = new MockNode(discoveryNode, service, listener, logger); MockNode node = new MockNode(discoveryNode, service, listener, logger);
@ -228,14 +229,14 @@ public class PublishClusterStateActionTests extends ESTestCase {
terminate(threadPool); terminate(threadPool);
} }
protected MockTransportService buildTransportService(Settings settings) { private static MockTransportService buildTransportService(Settings settings, ThreadPool threadPool) {
MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool); MockTransportService transportService = MockTransportService.local(settings, Version.CURRENT, threadPool);
transportService.start(); transportService.start();
transportService.acceptIncomingRequests(); transportService.acceptIncomingRequests();
return transportService; return transportService;
} }
protected MockPublishAction buildPublishClusterStateAction( private static MockPublishAction buildPublishClusterStateAction(
Settings settings, Settings settings,
MockTransportService transportService, MockTransportService transportService,
Supplier<ClusterState> clusterStateSupplier, Supplier<ClusterState> clusterStateSupplier,
@ -253,8 +254,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
} }
public void testSimpleClusterStatePublishing() throws Exception { public void testSimpleClusterStatePublishing() throws Exception {
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster(); MockNode nodeA = createMockNode("nodeA").setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); MockNode nodeB = createMockNode("nodeB");
// Initial cluster state // Initial cluster state
ClusterState clusterState = nodeA.clusterState; ClusterState clusterState = nodeA.clusterState;
@ -282,7 +283,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
// Adding new node - this node should get full cluster state while nodeB should still be getting diffs // Adding new node - this node should get full cluster state while nodeB should still be getting diffs
MockNode nodeC = createMockNode("nodeC", Settings.EMPTY); MockNode nodeC = createMockNode("nodeC");
// cluster state update 3 - register node C // cluster state update 3 - register node C
previousClusterState = clusterState; previousClusterState = clusterState;
@ -336,7 +337,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
fail("Shouldn't send cluster state to myself"); fail("Shouldn't send cluster state to myself");
}).setAsMaster(); }).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); MockNode nodeB = createMockNode("nodeB");
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
@ -444,7 +445,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
} }
}).setAsMaster(); }).setAsMaster();
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY); MockNode nodeB = createMockNode("nodeB");
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state // Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build(); DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
@ -495,7 +496,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
final int dataNodes = randomIntBetween(0, 5); final int dataNodes = randomIntBetween(0, 5);
final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(); final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build();
for (int i = 0; i < dataNodes; i++) { for (int i = 0; i < dataNodes; i++) {
discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings).discoveryNode); discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings, null).discoveryNode);
} }
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId()); discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build(); DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
@ -521,7 +522,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h") settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h")
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing .put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing
MockNode master = createMockNode("master", settings.build()); MockNode master = createMockNode("master", settings.build(), null);
// randomize things a bit // randomize things a bit
int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes]; int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes];
@ -551,7 +552,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
} }
final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter
for (int i = 0; i < dataNodes; i++) { for (int i = 0; i < dataNodes; i++) {
final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build()); final MockNode mockNode = createMockNode("data_" + i,
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(), null);
discoveryNodesBuilder.add(mockNode.discoveryNode); discoveryNodesBuilder.add(mockNode.discoveryNode);
if (randomBoolean()) { if (randomBoolean()) {
// we really don't care - just chaos monkey // we really don't care - just chaos monkey
@ -726,8 +728,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
Settings settings = Settings.builder() Settings settings = Settings.builder()
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout .put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout
MockNode master = createMockNode("master", settings); MockNode master = createMockNode("master", settings, null);
MockNode node = createMockNode("node", settings); MockNode node = createMockNode("node", settings, null);
ClusterState state = ClusterState.builder(master.clusterState) ClusterState state = ClusterState.builder(master.clusterState)
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build(); .nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build();
@ -843,7 +845,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
assertFalse(actual.wasReadFromDiff()); assertFalse(actual.wasReadFromDiff());
} }
static class MockPublishAction extends PublishClusterStateAction { public static class MockPublishAction extends PublishClusterStateAction {
AtomicBoolean timeoutOnSend = new AtomicBoolean(); AtomicBoolean timeoutOnSend = new AtomicBoolean();
AtomicBoolean errorOnSend = new AtomicBoolean(); AtomicBoolean errorOnSend = new AtomicBoolean();

View File

@ -396,49 +396,6 @@ public class NodeEnvironmentTests extends ESTestCase {
env.close(); env.close();
} }
public void testWhetherClusterFolderShouldBeUsed() throws Exception {
Path tempNoCluster = createTempDir();
Path tempDataPath = tempNoCluster.toAbsolutePath();
Path tempPath = tempNoCluster.resolve("foo"); // "foo" is the cluster name
Path tempClusterPath = tempPath.toAbsolutePath();
assertFalse("non-existent directory should not be used", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
Settings settings = Settings.builder()
.put("cluster.name", "foo")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
Path nodeDataPath = env.nodeDataPaths()[0];
assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
}
IOUtils.rm(tempNoCluster);
Files.createDirectories(tempPath);
assertFalse("empty directory should not be read from", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
settings = Settings.builder()
.put("cluster.name", "foo")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
Path nodeDataPath = env.nodeDataPaths()[0];
assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
}
IOUtils.rm(tempNoCluster);
// Create a directory for the cluster name
Files.createDirectories(tempPath.resolve(NodeEnvironment.NODES_FOLDER));
assertTrue("there is data in the directory", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
settings = Settings.builder()
.put("cluster.name", "foo")
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
.put(Environment.PATH_DATA_SETTING.getKey(), tempClusterPath.toString()).build();
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
Path nodeDataPath = env.nodeDataPaths()[0];
assertEquals(nodeDataPath, tempClusterPath.resolve("nodes").resolve("0"));
}
}
public void testPersistentNodeId() throws IOException { public void testPersistentNodeId() throws IOException {
String[] paths = tmpPaths(); String[] paths = tmpPaths();
NodeEnvironment env = newNodeEnvironment(paths, Settings.builder() NodeEnvironment env = newNodeEnvironment(paths, Settings.builder()

View File

@ -930,36 +930,30 @@ public class GetActionIT extends ESIntegTestCase {
private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields, @Nullable String routing) { private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields, @Nullable String routing) {
for (String field : fields) { for (String field : fields) {
assertGetFieldWorks(index, type, docId, field, false, routing); assertGetFieldWorks(index, type, docId, field, routing);
assertGetFieldWorks(index, type, docId, field, true, routing); assertGetFieldWorks(index, type, docId, field, routing);
} }
} }
private void assertGetFieldWorks(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { private void assertGetFieldWorks(String index, String type, String docId, String field, @Nullable String routing) {
GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing); GetResponse response = getDocument(index, type, docId, field, routing);
assertThat(response.getId(), equalTo(docId)); assertThat(response.getId(), equalTo(docId));
assertTrue(response.isExists()); assertTrue(response.isExists());
assertNotNull(response.getField(field)); assertNotNull(response.getField(field));
response = multiGetDocument(index, type, docId, field, ignoreErrors, routing); response = multiGetDocument(index, type, docId, field, routing);
assertThat(response.getId(), equalTo(docId)); assertThat(response.getId(), equalTo(docId));
assertTrue(response.isExists()); assertTrue(response.isExists());
assertNotNull(response.getField(field)); assertNotNull(response.getField(field));
} }
protected void assertGetFieldsException(String index, String type, String docId, String[] fields) {
for (String field : fields) {
assertGetFieldException(index, type, docId, field);
}
}
private void assertGetFieldException(String index, String type, String docId, String field) { private void assertGetFieldException(String index, String type, String docId, String field) {
try { try {
client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(false).get(); client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).get();
fail(); fail();
} catch (ElasticsearchException e) { } catch (ElasticsearchException e) {
assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called.")); assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called."));
} }
MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).setIgnoreErrorsOnGeneratedFields(false).get(); MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).get();
assertNull(multiGetResponse.getResponses()[0].getResponse()); assertNull(multiGetResponse.getResponses()[0].getResponse());
assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called.")); assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called."));
} }
@ -970,7 +964,7 @@ public class GetActionIT extends ESIntegTestCase {
protected void assertGetFieldsNull(String index, String type, String docId, String[] fields, @Nullable String routing) { protected void assertGetFieldsNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
for (String field : fields) { for (String field : fields) {
assertGetFieldNull(index, type, docId, field, true, routing); assertGetFieldNull(index, type, docId, field, routing);
} }
} }
@ -980,37 +974,37 @@ public class GetActionIT extends ESIntegTestCase {
protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields, @Nullable String routing) { protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
for (String field : fields) { for (String field : fields) {
assertGetFieldNull(index, type, docId, field, true, routing); assertGetFieldNull(index, type, docId, field, routing);
assertGetFieldNull(index, type, docId, field, false, routing); assertGetFieldNull(index, type, docId, field, routing);
} }
} }
protected void assertGetFieldNull(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { protected void assertGetFieldNull(String index, String type, String docId, String field, @Nullable String routing) {
//for get //for get
GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing); GetResponse response = getDocument(index, type, docId, field, routing);
assertTrue(response.isExists()); assertTrue(response.isExists());
assertNull(response.getField(field)); assertNull(response.getField(field));
assertThat(response.getId(), equalTo(docId)); assertThat(response.getId(), equalTo(docId));
//same for multi get //same for multi get
response = multiGetDocument(index, type, docId, field, ignoreErrors, routing); response = multiGetDocument(index, type, docId, field, routing);
assertNull(response.getField(field)); assertNull(response.getField(field));
assertThat(response.getId(), equalTo(docId)); assertThat(response.getId(), equalTo(docId));
assertTrue(response.isExists()); assertTrue(response.isExists());
} }
private GetResponse multiGetDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { private GetResponse multiGetDocument(String index, String type, String docId, String field, @Nullable String routing) {
MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).fields(field); MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).fields(field);
if (routing != null) { if (routing != null) {
getItem.routing(routing); getItem.routing(routing);
} }
MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem).setIgnoreErrorsOnGeneratedFields(ignoreErrors); MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem);
MultiGetResponse multiGetResponse = multiGetRequestBuilder.get(); MultiGetResponse multiGetResponse = multiGetRequestBuilder.get();
assertThat(multiGetResponse.getResponses().length, equalTo(1)); assertThat(multiGetResponse.getResponses().length, equalTo(1));
return multiGetResponse.getResponses()[0].getResponse(); return multiGetResponse.getResponses()[0].getResponse();
} }
private GetResponse getDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) { private GetResponse getDocument(String index, String type, String docId, String field, @Nullable String routing) {
GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(ignoreErrors); GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field);
if (routing != null) { if (routing != null) {
getRequestBuilder.setRouting(routing); getRequestBuilder.setRouting(routing);
} }

View File

@ -43,6 +43,8 @@ import java.util.concurrent.CountDownLatch;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.atomic.AtomicReference;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.index.query.QueryBuilders.termQuery; import static org.elasticsearch.index.query.QueryBuilders.termQuery;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
@ -52,13 +54,13 @@ import static org.hamcrest.Matchers.nullValue;
public class IndexServiceTests extends ESSingleNodeTestCase { public class IndexServiceTests extends ESSingleNodeTestCase {
public void testDetermineShadowEngineShouldBeUsed() { public void testDetermineShadowEngineShouldBeUsed() {
Settings regularSettings = Settings.builder() Settings regularSettings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) .put(SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 1)
.build(); .build();
Settings shadowSettings = Settings.builder() Settings shadowSettings = Settings.builder()
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2) .put(SETTING_NUMBER_OF_SHARDS, 2)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1) .put(SETTING_NUMBER_OF_REPLICAS, 1)
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true) .put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
.build(); .build();

View File

@ -18,15 +18,7 @@
*/ */
package org.elasticsearch.index.replication; package org.elasticsearch.index.replication;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexNotFoundException;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.AlreadyClosedException; import org.apache.lucene.store.AlreadyClosedException;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.DocWriteResponse; import org.elasticsearch.action.DocWriteResponse;
@ -41,52 +33,21 @@ import org.elasticsearch.action.support.replication.TransportWriteAction;
import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData; import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper; import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.collect.Iterators; import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.compress.CompressedXContent;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.Index; import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MapperTestUtils;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.query.DisabledQueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.Uid; import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.shard.IndexEventListener;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardState; import org.elasticsearch.index.shard.IndexShardTestCase;
import org.elasticsearch.index.shard.ShardId; import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.index.shard.ShardPath;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoverySourceHandler;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget; import org.elasticsearch.indices.recovery.RecoveryTarget;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
import org.elasticsearch.test.DummyShardLock;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.junit.annotations.TestLogging;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportResponse; import org.elasticsearch.transport.TransportResponse;
import java.io.IOException; import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
@ -94,10 +55,8 @@ import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Set; import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Future; import java.util.concurrent.Future;
import java.util.concurrent.FutureTask; import java.util.concurrent.FutureTask;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.BiFunction; import java.util.function.BiFunction;
import java.util.function.Consumer; import java.util.function.Consumer;
@ -107,98 +66,24 @@ import java.util.stream.StreamSupport;
import static org.hamcrest.Matchers.empty; import static org.hamcrest.Matchers.empty;
import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.equalTo;
public abstract class ESIndexLevelReplicationTestCase extends ESTestCase { public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase {
protected ThreadPool threadPool;
protected final Index index = new Index("test", "uuid"); protected final Index index = new Index("test", "uuid");
private final ShardId shardId = new ShardId(index, 0); private final ShardId shardId = new ShardId(index, 0);
private final Map<String, String> indexMapping = Collections.singletonMap("type", "{ \"type\": {} }"); private final Map<String, String> indexMapping = Collections.singletonMap("type", "{ \"type\": {} }");
protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
}
@Override
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
fail(ExceptionsHelper.detailedMessage(e));
}
};
@TestLogging("index.shard:TRACE,index.replication:TRACE,indices.recovery:TRACE")
public void testIndexingDuringFileRecovery() throws Exception {
try (ReplicationGroup shards = createGroup(randomInt(1))) {
shards.startAll();
int docs = shards.indexDocs(randomInt(50));
shards.flush();
IndexShard replica = shards.addReplica();
final CountDownLatch recoveryBlocked = new CountDownLatch(1);
final CountDownLatch releaseRecovery = new CountDownLatch(1);
final Future<Void> recoveryFuture = shards.asyncRecoverReplica(replica,
new BiFunction<IndexShard, DiscoveryNode, RecoveryTarget>() {
@Override
public RecoveryTarget apply(IndexShard indexShard, DiscoveryNode node) {
return new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) {
@Override
public void renameAllTempFiles() throws IOException {
super.renameAllTempFiles();
recoveryBlocked.countDown();
try {
releaseRecovery.await();
} catch (InterruptedException e) {
throw new IOException("terminated by interrupt", e);
}
}
};
}
});
recoveryBlocked.await();
docs += shards.indexDocs(randomInt(20));
releaseRecovery.countDown();
recoveryFuture.get();
shards.assertAllEqual(docs);
}
}
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool(getClass().getName());
}
@Override
public void tearDown() throws Exception {
super.tearDown();
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
}
private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
final ShardId shardId = shardPath.getShardId();
final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return newFSDirectory(shardPath.resolveIndex());
}
@Override
public long throttleTimeInNanos() {
return 0;
}
};
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
}
protected ReplicationGroup createGroup(int replicas) throws IOException { protected ReplicationGroup createGroup(int replicas) throws IOException {
final Path homePath = createTempDir(); Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas) .put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1) .put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build(); .build();
IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).primaryTerm(0, 1).build(); IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName())
return new ReplicationGroup(metaData, homePath); .settings(settings)
.primaryTerm(0, 1);
for (Map.Entry<String, String> typeMapping: indexMapping.entrySet()) {
metaData.putMapping(typeMapping.getKey(), typeMapping.getValue());
}
return new ReplicationGroup(metaData.build());
} }
protected DiscoveryNode getDiscoveryNode(String id) { protected DiscoveryNode getDiscoveryNode(String id) {
@ -206,50 +91,22 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT); Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
} }
private IndexShard newShard(boolean primary, DiscoveryNode node, IndexMetaData indexMetaData, Path homePath) throws IOException {
// add node name to settings for propper logging
final Settings nodeSettings = Settings.builder().put("node.name", node.getName()).build();
final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, node.getId(), primary, ShardRoutingState.INITIALIZING,
primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE);
final Path path = Files.createDirectories(homePath.resolve(node.getId()));
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path);
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
Store store = createStore(indexSettings, shardPath);
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
MapperService mapperService = MapperTestUtils.newMapperService(homePath, indexSettings.getSettings());
for (Map.Entry<String, String> type : indexMapping.entrySet()) {
mapperService.merge(type.getKey(), new CompressedXContent(type.getValue()), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final IndexEventListener indexEventListener = new IndexEventListener() {
};
final Engine.Warmer warmer = searcher -> {
};
return new IndexShard(shardRouting, indexSettings, shardPath, store, indexCache, mapperService, similarityService, null, null,
indexEventListener, null, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, Collections.emptyList(),
Collections.emptyList());
}
protected class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> { protected class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> {
private final IndexShard primary; private final IndexShard primary;
private final List<IndexShard> replicas; private final List<IndexShard> replicas;
private final IndexMetaData indexMetaData; private final IndexMetaData indexMetaData;
private final Path homePath;
private final AtomicInteger replicaId = new AtomicInteger(); private final AtomicInteger replicaId = new AtomicInteger();
private final AtomicInteger docId = new AtomicInteger(); private final AtomicInteger docId = new AtomicInteger();
boolean closed = false; boolean closed = false;
ReplicationGroup(final IndexMetaData indexMetaData, Path homePath) throws IOException { ReplicationGroup(final IndexMetaData indexMetaData) throws IOException {
primary = newShard(true, getDiscoveryNode("s0"), indexMetaData, homePath); primary = newShard(shardId, true, "s0", indexMetaData, null);
replicas = new ArrayList<>(); replicas = new ArrayList<>();
this.indexMetaData = indexMetaData; this.indexMetaData = indexMetaData;
this.homePath = homePath;
for (int i = 0; i < indexMetaData.getNumberOfReplicas(); i++) { for (int i = 0; i < indexMetaData.getNumberOfReplicas(); i++) {
addReplica(); addReplica();
} }
} }
public int indexDocs(final int numOfDoc) throws Exception { public int indexDocs(final int numOfDoc) throws Exception {
@ -289,7 +146,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
} }
public synchronized IndexShard addReplica() throws IOException { public synchronized IndexShard addReplica() throws IOException {
final IndexShard replica = newShard(false, getDiscoveryNode("s" + replicaId.incrementAndGet()), indexMetaData, homePath); final IndexShard replica = newShard(shardId, false,"s" + replicaId.incrementAndGet(), indexMetaData, null);
replicas.add(replica); replicas.add(replica);
return replica; return replica;
} }
@ -304,39 +161,8 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
} }
public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier, public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
boolean markAsRecovering) boolean markAsRecovering) throws IOException {
throws IOException { ESIndexLevelReplicationTestCase.this.recoverReplica(replica, primary, targetSupplier, markAsRecovering);
final DiscoveryNode pNode = getPrimaryNode();
final DiscoveryNode rNode = getDiscoveryNode(replica.routingEntry().currentNodeId());
if (markAsRecovering) {
replica.markAsRecovering("remote",
new RecoveryState(replica.routingEntry(), pNode, rNode));
} else {
assertEquals(replica.state(), IndexShardState.RECOVERING);
}
replica.prepareForIndexRecovery();
RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
getMetadataSnapshotOrEmpty(replica), false, 0);
RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {},
(int) ByteSizeUnit.MB.toKB(1), logger);
recovery.recoverToTarget();
recoveryTarget.markAsDone();
replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
}
private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
Store.MetadataSnapshot result;
try {
result = replica.snapshotStoreMetadata();
} catch (IndexNotFoundException e) {
// OK!
result = Store.MetadataSnapshot.EMPTY;
} catch (IOException e) {
logger.warn("failed read store, treating as empty", e);
result = Store.MetadataSnapshot.EMPTY;
}
return result;
} }
public synchronized DiscoveryNode getPrimaryNode() { public synchronized DiscoveryNode getPrimaryNode() {
@ -367,24 +193,6 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
} }
} }
private Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
shard.refresh("get_uids");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
Set<Uid> ids = new HashSet<>();
for (LeafReaderContext leafContext : searcher.reader().leaves()) {
LeafReader reader = leafContext.reader();
Bits liveDocs = reader.getLiveDocs();
for (int i = 0; i < reader.maxDoc(); i++) {
if (liveDocs == null || liveDocs.get(i)) {
Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
}
}
}
return ids;
}
}
public synchronized void refresh(String source) { public synchronized void refresh(String source) {
for (IndexShard shard : this) { for (IndexShard shard : this) {
shard.refresh(source); shard.refresh(source);
@ -406,10 +214,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
public synchronized void close() throws Exception { public synchronized void close() throws Exception {
if (closed == false) { if (closed == false) {
closed = true; closed = true;
for (IndexShard shard : this) { closeShards(this);
shard.close("eol", false);
IOUtils.close(shard.store());
}
} else { } else {
throw new AlreadyClosedException("too bad"); throw new AlreadyClosedException("too bad");
} }

View File

@ -0,0 +1,476 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import org.apache.lucene.document.Field;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.Term;
import org.apache.lucene.store.LockObtainFailedException;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ExceptionsHelper;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.stats.IndexStats;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.cluster.ClusterInfoService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.InternalClusterInfoService;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.env.Environment;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.env.ShardLock;
import org.elasticsearch.index.Index;
import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.mapper.Mapping;
import org.elasticsearch.index.mapper.ParseContext;
import org.elasticsearch.index.mapper.ParsedDocument;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.translog.Translog;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.test.DummyShardLock;
import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.test.IndexSettingsModule;
import org.elasticsearch.test.InternalSettingsPlugin;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.BrokenBarrierException;
import java.util.concurrent.CyclicBarrier;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicReference;
import static java.util.Collections.emptyMap;
import static java.util.Collections.emptySet;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
import static org.hamcrest.Matchers.equalTo;
public class IndexShardIT extends ESSingleNodeTestCase {
@Override
protected Collection<Class<? extends Plugin>> getPlugins() {
return pluginList(InternalSettingsPlugin.class);
}
private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl,
ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
Field versionField = new NumericDocValuesField("_version", 0);
document.add(uidField);
document.add(versionField);
return new ParsedDocument(versionField, id, type, routing, timestamp, ttl, Collections.singletonList(document), source,
mappingUpdate);
}
public void testLockTryingToDelete() throws Exception {
createIndex("test");
ensureGreen();
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
ClusterService cs = getInstanceFromNode(ClusterService.class);
final Index index = cs.state().metaData().index("test").getIndex();
Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
logger.info("--> paths: [{}]", (Object)shardPaths);
// Should not be able to acquire the lock because it's already open
try {
NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths);
fail("should not have been able to acquire the lock");
} catch (LockObtainFailedException e) {
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
}
// Test without the regular shard lock to assume we can acquire it
// (worst case, meaning that the shard lock could be acquired and
// we're green to delete the shard's directory)
ShardLock sLock = new DummyShardLock(new ShardId(index, 0));
try {
env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY));
fail("should not have been able to delete the directory");
} catch (LockObtainFailedException e) {
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
}
}
public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
client().prepareIndex("test", "test").setSource("{}").get();
ensureGreen("test");
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
assertBusy(() -> {
IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test");
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
}
);
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
}
public void testDurableFlagHasEffect() {
createIndex("test");
ensureGreen();
client().prepareIndex("test", "bar", "1").setSource("{}").get();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
setDurability(shard, Translog.Durability.REQUEST);
assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.ASYNC);
client().prepareIndex("test", "bar", "2").setSource("{}").get();
assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.REQUEST);
client().prepareDelete("test", "bar", "1").get();
assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.ASYNC);
client().prepareDelete("test", "bar", "2").get();
assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.REQUEST);
assertNoFailures(client().prepareBulk()
.add(client().prepareIndex("test", "bar", "3").setSource("{}"))
.add(client().prepareDelete("test", "bar", "1")).get());
assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
setDurability(shard, Translog.Durability.ASYNC);
assertNoFailures(client().prepareBulk()
.add(client().prepareIndex("test", "bar", "4").setSource("{}"))
.add(client().prepareDelete("test", "bar", "3")).get());
setDurability(shard, Translog.Durability.REQUEST);
assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
}
private void setDurability(IndexShard shard, Translog.Durability durability) {
client().admin().indices().prepareUpdateSettings(shard.shardId().getIndexName()).setSettings(
Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get();
assertEquals(durability, shard.getTranslogDurability());
}
public void testUpdatePriority() {
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(IndexMetaData.SETTING_PRIORITY, 200));
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
.build()).get();
assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
}
public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
Environment env = getInstanceFromNode(Environment.class);
Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
logger.info("--> idxPath: [{}]", idxPath);
Settings idxSettings = Settings.builder()
.put(IndexMetaData.SETTING_DATA_PATH, idxPath)
.build();
createIndex("test", idxSettings);
ensureGreen("test");
client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
SearchResponse response = client().prepareSearch("test").get();
assertHitCount(response, 1L);
client().admin().indices().prepareDelete("test").get();
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
assertPathHasBeenCleared(idxPath);
}
public void testExpectedShardSizeIsPresent() throws InterruptedException {
assertAcked(client().admin().indices().prepareCreate("test")
.setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
for (int i = 0; i < 50; i++) {
client().prepareIndex("test", "test").setSource("{}").get();
}
ensureGreen("test");
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
clusterInfoService.refresh();
ClusterState state = getInstanceFromNode(ClusterService.class).state();
Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test")
.getShards().get(0).primaryShard());
assertNotNull(test);
assertTrue(test > 0);
}
public void testIndexCanChangeCustomDataPath() throws Exception {
Environment env = getInstanceFromNode(Environment.class);
Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
final String INDEX = "idx";
Path startDir = idxPath.resolve("start-" + randomAsciiOfLength(10));
Path endDir = idxPath.resolve("end-" + randomAsciiOfLength(10));
logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString());
logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString());
// temp dirs are automatically created, but the end dir is what
// startDir is going to be renamed as, so it needs to be deleted
// otherwise we get all sorts of errors about the directory
// already existing
IOUtils.rm(endDir);
Settings sb = Settings.builder()
.put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
.build();
Settings sb2 = Settings.builder()
.put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
.build();
logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
createIndex(INDEX, sb);
ensureGreen(INDEX);
client().prepareIndex(INDEX, "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
logger.info("--> closing the index [{}]", INDEX);
client().admin().indices().prepareClose(INDEX).get();
logger.info("--> index closed, re-opening...");
client().admin().indices().prepareOpen(INDEX).get();
logger.info("--> index re-opened");
ensureGreen(INDEX);
resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
// Now, try closing and changing the settings
logger.info("--> closing the index [{}]", INDEX);
client().admin().indices().prepareClose(INDEX).get();
logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName());
assert Files.exists(endDir) == false : "end directory should not exist!";
Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING);
logger.info("--> updating settings...");
client().admin().indices().prepareUpdateSettings(INDEX)
.setSettings(sb2)
.setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
.get();
assert Files.exists(startDir) == false : "start dir shouldn't exist";
logger.info("--> settings updated and files moved, re-opening index");
client().admin().indices().prepareOpen(INDEX).get();
logger.info("--> index re-opened");
ensureGreen(INDEX);
resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
assertAcked(client().admin().indices().prepareDelete(INDEX));
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
assertPathHasBeenCleared(startDir.toAbsolutePath());
assertPathHasBeenCleared(endDir.toAbsolutePath());
}
public void testMaybeFlush() throws Exception {
createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)
.build());
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldFlush());
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(),
new BytesArray(new byte[]{1}), null);
Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc);
shard.index(index);
assertTrue(shard.shouldFlush());
assertEquals(2, shard.getEngine().getTranslog().totalOperations());
client().prepareIndex("test", "test", "2").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertBusy(() -> { // this is async
assertFalse(shard.shouldFlush());
});
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
shard.getEngine().getTranslog().sync();
long size = shard.getEngine().getTranslog().sizeInBytes();
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES))
.build()).get();
client().prepareDelete("test", "test", "2").get();
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertBusy(() -> { // this is async
logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
assertFalse(shard.shouldFlush());
});
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
}
public void testStressMaybeFlush() throws Exception {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService test = indicesService.indexService(resolveIndex("test"));
final IndexShard shard = test.getShardOrNull(0);
assertFalse(shard.shouldFlush());
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
assertFalse(shard.shouldFlush());
final AtomicBoolean running = new AtomicBoolean(true);
final int numThreads = randomIntBetween(2, 4);
Thread[] threads = new Thread[numThreads];
CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
for (int i = 0; i < threads.length; i++) {
threads[i] = new Thread() {
@Override
public void run() {
try {
barrier.await();
} catch (InterruptedException | BrokenBarrierException e) {
throw new RuntimeException(e);
}
while (running.get()) {
shard.maybeFlush();
}
}
};
threads[i].start();
}
barrier.await();
FlushStats flushStats = shard.flushStats();
long total = flushStats.getTotal();
client().prepareIndex("test", "test", "1").setSource("{}").get();
assertBusy(() -> assertEquals(total + 1, shard.flushStats().getTotal()));
running.set(false);
for (int i = 0; i < threads.length; i++) {
threads[i].join();
}
assertEquals(total + 1, shard.flushStats().getTotal());
}
public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable {
createIndex("test");
ensureGreen();
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
IndexService indexService = indicesService.indexService(resolveIndex("test"));
IndexShard shard = indexService.getShardOrNull(0);
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get();
client().prepareDelete("test", "test", "0").get();
client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
shard.close("simon says", false);
AtomicReference<IndexShard> shardRef = new AtomicReference<>();
List<Exception> failures = new ArrayList<>();
IndexingOperationListener listener = new IndexingOperationListener() {
@Override
public void postIndex(Engine.Index index, boolean created) {
try {
assertNotNull(shardRef.get());
// this is all IMC needs to do - check current memory and refresh
assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
shardRef.get().refresh("test");
} catch (Exception e) {
failures.add(e);
throw e;
}
}
@Override
public void postDelete(Engine.Delete delete) {
try {
assertNotNull(shardRef.get());
// this is all IMC needs to do - check current memory and refresh
assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
shardRef.get().refresh("test");
} catch (Exception e) {
failures.add(e);
throw e;
}
}
};
final IndexShard newShard = newIndexShard(indexService, shard, wrapper, listener);
shardRef.set(newShard);
recoverShard(newShard);
try {
ExceptionsHelper.rethrowAndSuppress(failures);
} finally {
newShard.close("just do it", randomBoolean());
}
}
public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
assertTrue(newShard.recoverFromStore());
newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
return newShard;
}
public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper,
IndexingOperationListener... listeners) throws IOException {
ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(),
shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(),
indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners));
return newShard;
}
private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) {
ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(),
existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING,
existingShardRouting.allocationId());
shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"),
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE);
return shardRouting;
}
}

View File

@ -28,6 +28,7 @@ import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.BaseDirectoryWrapper; import org.apache.lucene.store.BaseDirectoryWrapper;
import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.IOUtils;
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader; import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import java.io.IOException; import java.io.IOException;
@ -64,4 +65,7 @@ public class ShardUtilsTests extends ESTestCase {
IOUtils.close(writer, dir); IOUtils.close(writer, dir);
} }
public static Engine getShardEngine(IndexShard shard) {
return shard.getEngine();
}
} }

View File

@ -21,7 +21,6 @@ package org.elasticsearch.indices;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse; import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
import org.elasticsearch.cluster.node.DiscoveryNode; import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting; import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress;
@ -31,7 +30,7 @@ import org.elasticsearch.index.IndexService;
import org.elasticsearch.index.engine.Engine; import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.shard.IndexSearcherWrapper; import org.elasticsearch.index.shard.IndexSearcherWrapper;
import org.elasticsearch.index.shard.IndexShard; import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.IndexShardTests; import org.elasticsearch.index.shard.IndexShardIT;
import org.elasticsearch.indices.recovery.RecoveryState; import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.test.ESSingleNodeTestCase; import org.elasticsearch.test.ESSingleNodeTestCase;
import org.elasticsearch.threadpool.ThreadPool; import org.elasticsearch.threadpool.ThreadPool;
@ -443,7 +442,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
shard.writeIndexingBuffer(); shard.writeIndexingBuffer();
} }
}; };
final IndexShard newShard = IndexShardTests.newIndexShard(indexService, shard, wrapper, imc); final IndexShard newShard = IndexShardIT.newIndexShard(indexService, shard, wrapper, imc);
shardRef.set(newShard); shardRef.set(newShard);
try { try {
assertEquals(0, imc.availableShards().size()); assertEquals(0, imc.availableShards().size());

View File

@ -17,7 +17,7 @@
* under the License. * under the License.
*/ */
package org.elasticsearch.search.internal; package org.elasticsearch.search;
import org.apache.lucene.queries.TermsQuery; import org.apache.lucene.queries.TermsQuery;
import org.apache.lucene.search.BooleanQuery; import org.apache.lucene.search.BooleanQuery;
@ -26,6 +26,7 @@ import org.apache.lucene.search.Query;
import org.apache.lucene.util.BytesRef; import org.apache.lucene.util.BytesRef;
import org.elasticsearch.common.lucene.search.Queries; import org.elasticsearch.common.lucene.search.Queries;
import org.elasticsearch.index.mapper.TypeFieldMapper; import org.elasticsearch.index.mapper.TypeFieldMapper;
import org.elasticsearch.search.DefaultSearchContext;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import static org.apache.lucene.search.BooleanClause.Occur.FILTER; import static org.apache.lucene.search.BooleanClause.Occur.FILTER;

View File

@ -56,12 +56,7 @@ public class SearchRequestTests extends ESTestCase {
} }
}; };
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, SearchModule searchModule = new SearchModule(Settings.EMPTY, false,
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) { Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
@Override
protected void configureSearch() {
// Skip me
}
};
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(); List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables());

View File

@ -119,12 +119,7 @@ public class AggregatorParsingTests extends ESTestCase {
bindMapperExtension(); bindMapperExtension();
} }
}; };
SearchModule searchModule = new SearchModule(settings, false, emptyList()) { SearchModule searchModule = new SearchModule(settings, false, emptyList());
@Override
protected void configureSearch() {
// Skip me
}
};
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(); List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables());

View File

@ -143,12 +143,7 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
bindMapperExtension(); bindMapperExtension();
} }
}; };
SearchModule searchModule = new SearchModule(settings, false, emptyList()) { SearchModule searchModule = new SearchModule(settings, false, emptyList());
@Override
protected void configureSearch() {
// Skip me
}
};
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(); List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables());

View File

@ -133,12 +133,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
} }
}; };
SearchModule searchModule = new SearchModule(settings, false, SearchModule searchModule = new SearchModule(settings, false,
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) { Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
@Override
protected void configureSearch() {
// Skip me
}
};
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(); List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables());

View File

@ -23,6 +23,8 @@ import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesReference; import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.xcontent.XContentBuilder; import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory; import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.index.shard.IndexShard;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.search.fetch.FetchSubPhase; import org.elasticsearch.search.fetch.FetchSubPhase;
import org.elasticsearch.search.internal.InternalSearchHit; import org.elasticsearch.search.internal.InternalSearchHit;
import org.elasticsearch.search.internal.SearchContext; import org.elasticsearch.search.internal.SearchContext;
@ -33,37 +35,11 @@ import org.elasticsearch.test.TestSearchContext;
import java.io.IOException; import java.io.IOException;
import java.util.Collections; import java.util.Collections;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class FetchSourceSubPhaseTests extends ESTestCase { public class FetchSourceSubPhaseTests extends ESTestCase {
static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext {
FetchSourceContext context;
BytesReference source;
FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) {
super(null);
this.context = context;
this.source = source;
}
@Override
public boolean sourceRequested() {
return context != null && context.fetchSource();
}
@Override
public FetchSourceContext fetchSourceContext() {
return context;
}
@Override
public SearchLookup lookup() {
SearchLookup lookup = super.lookup();
lookup.source().setSource(source);
return lookup;
}
}
public void testFetchSource() throws IOException { public void testFetchSource() throws IOException {
XContentBuilder source = XContentFactory.jsonBuilder().startObject() XContentBuilder source = XContentFactory.jsonBuilder().startObject()
.field("field", "value") .field("field", "value")
@ -109,11 +85,14 @@ public class FetchSourceSubPhaseTests extends ESTestCase {
hitContext = hitExecute(null, false, null, null); hitContext = hitExecute(null, false, null, null);
assertNull(hitContext.hit().sourceAsMap()); assertNull(hitContext.hit().sourceAsMap());
hitContext = hitExecute(null, true, "field1", null); IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> hitExecute(null, true, "field1", null));
assertNull(hitContext.hit().sourceAsMap()); assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " +
"for index [index]", exception.getMessage());
hitContext = hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"}); exception = expectThrows(IllegalArgumentException.class,
assertNull(hitContext.hit().sourceAsMap()); () -> hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"}));
assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " +
"for index [index]", exception.getMessage());
} }
private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) { private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) {
@ -131,4 +110,40 @@ public class FetchSourceSubPhaseTests extends ESTestCase {
phase.hitExecute(searchContext, hitContext); phase.hitExecute(searchContext, hitContext);
return hitContext; return hitContext;
} }
private static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext {
final FetchSourceContext context;
final BytesReference source;
final IndexShard indexShard;
FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) {
super(null);
this.context = context;
this.source = source;
this.indexShard = mock(IndexShard.class);
when(indexShard.shardId()).thenReturn(new ShardId("index", "index", 1));
}
@Override
public boolean sourceRequested() {
return context != null && context.fetchSource();
}
@Override
public FetchSourceContext fetchSourceContext() {
return context;
}
@Override
public SearchLookup lookup() {
SearchLookup lookup = super.lookup();
lookup.source().setSource(source);
return lookup;
}
@Override
public IndexShard indexShard() {
return indexShard;
}
}
} }

View File

@ -19,7 +19,6 @@
package org.elasticsearch.search.fetch.subphase.highlight; package org.elasticsearch.search.fetch.subphase.highlight;
import com.carrotsearch.randomizedtesting.generators.RandomPicks; import com.carrotsearch.randomizedtesting.generators.RandomPicks;
import org.apache.lucene.search.join.ScoreMode; import org.apache.lucene.search.join.ScoreMode;
import org.elasticsearch.Version; import org.elasticsearch.Version;
import org.elasticsearch.action.index.IndexRequestBuilder; import org.elasticsearch.action.index.IndexRequestBuilder;
@ -38,6 +37,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder;
import org.elasticsearch.index.query.Operator; import org.elasticsearch.index.query.Operator;
import org.elasticsearch.index.query.QueryBuilder; import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders; import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
import org.elasticsearch.index.search.MatchQuery; import org.elasticsearch.index.search.MatchQuery;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.rest.RestStatus; import org.elasticsearch.rest.RestStatus;
@ -50,8 +50,8 @@ import org.hamcrest.Matcher;
import org.hamcrest.Matchers; import org.hamcrest.Matchers;
import java.io.IOException; import java.io.IOException;
import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.Collections;
import java.util.HashMap; import java.util.HashMap;
import java.util.Map; import java.util.Map;
@ -96,7 +96,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(InternalSettingsPlugin.class); return Collections.singletonList(InternalSettingsPlugin.class);
} }
public void testHighlightingWithWildcardName() throws IOException { public void testHighlightingWithWildcardName() throws IOException {
@ -2851,4 +2851,21 @@ public class HighlighterSearchIT extends ESIntegTestCase {
assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>")); assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>"));
assertThat(field.getFragments()[1].string(), equalTo("<em>cow</em>")); assertThat(field.getFragments()[1].string(), equalTo("<em>cow</em>"));
} }
public void testFunctionScoreQueryHighlight() throws Exception {
client().prepareIndex("test", "type", "1")
.setSource(jsonBuilder().startObject().field("text", "brown").endObject())
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
.get();
SearchResponse searchResponse = client().prepareSearch()
.setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro")))
.highlighter(new HighlightBuilder()
.field(new Field("text")))
.get();
assertHitCount(searchResponse, 1);
HighlightField field = searchResponse.getHits().getAt(0).highlightFields().get("text");
assertThat(field.getFragments().length, equalTo(1));
assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>"));
}
} }

View File

@ -80,7 +80,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
@Override @Override
protected Collection<Class<? extends Plugin>> nodePlugins() { protected Collection<Class<? extends Plugin>> nodePlugins() {
return Arrays.asList(CustomScriptPlugin.class); return Collections.singletonList(CustomScriptPlugin.class);
} }
public static class CustomScriptPlugin extends MockScriptPlugin { public static class CustomScriptPlugin extends MockScriptPlugin {

View File

@ -59,12 +59,7 @@ public class ShardSearchTransportRequestTests extends ESTestCase {
} }
}; };
SearchModule searchModule = new SearchModule(Settings.EMPTY, false, SearchModule searchModule = new SearchModule(Settings.EMPTY, false,
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) { Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
@Override
protected void configureSearch() {
// Skip me
}
};
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>(); List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
entries.addAll(indicesModule.getNamedWriteables()); entries.addAll(indicesModule.getNamedWriteables());
entries.addAll(searchModule.getNamedWriteables()); entries.addAll(searchModule.getNamedWriteables());

View File

@ -24,6 +24,9 @@ integTest {
setting 'script.inline', 'true' setting 'script.inline', 'true'
setting 'script.stored', 'true' setting 'script.stored', 'true'
setting 'script.max_compilations_per_minute', '1000' setting 'script.max_compilations_per_minute', '1000'
/* Enable regexes in painless so our tests don't complain about example
* snippets that use them. */
setting 'script.painless.regex.enabled', 'true'
Closure configFile = { Closure configFile = {
extraConfigFile it, "src/test/cluster/config/$it" extraConfigFile it, "src/test/cluster/config/$it"
} }

View File

@ -27,6 +27,8 @@ way to reindex old indices is to use the `reindex` API.
* <<breaking_60_mapping_changes>> * <<breaking_60_mapping_changes>>
* <<breaking_60_rest_changes>> * <<breaking_60_rest_changes>>
* <<breaking_60_search_changes>> * <<breaking_60_search_changes>>
* <<breaking_60_docs_changes>>
* <<breaking_60_cluster_changes>>
include::migrate_6_0/mapping.asciidoc[] include::migrate_6_0/mapping.asciidoc[]
@ -35,3 +37,5 @@ include::migrate_6_0/rest.asciidoc[]
include::migrate_6_0/search.asciidoc[] include::migrate_6_0/search.asciidoc[]
include::migrate_6_0/docs.asciidoc[] include::migrate_6_0/docs.asciidoc[]
include::migrate_6_0/cluster.asciidoc[]

View File

@ -0,0 +1,27 @@
[[breaking_60_cluster_changes]]
=== Cluster changes
==== Cluster name no longer allowed in path.data
Previously the cluster name could be used in the `path.data` setting with a
warning. This is now no longer allowed. For instance, in the previous version
this was valid:
[source,sh]
--------------------------------------------------
# Assuming path.data is /tmp/mydata
# No longer supported:
$ tree /tmp/mydata
/tmp/mydata
├── <cluster_name>
│   └── nodes
│   └── 0
│   └── <etc>
# Should be changed to:
$ tree /tmp/mydata
/tmp/mydata
├── nodes
│   └── 0
│   └── <etc>
--------------------------------------------------

View File

@ -1,4 +1,4 @@
[[breaking_60_document_api_changes]] [[breaking_60_docs_changes]]
=== Document API changes === Document API changes
==== version type 'force' removed ==== version type 'force' removed

View File

@ -196,6 +196,15 @@ POST hockey/player/1/_update
[[modules-scripting-painless-regex]] [[modules-scripting-painless-regex]]
=== Regular expressions === Regular expressions
NOTE: Regexes are disabled by default because they circumvent Painless's
protection against long running and memory hungry scripts. To make matters
worse even innocuous looking regexes can have staggering performance and stack
depth behavior. They remain an amazing powerful tool but are too scary to enable
by default. To enable them yourself set `script.painless.regex.enabled: true` in
`elasticsearch.yml`. We'd like very much to have a safe alternative
implementation that can be enabled by default so check this space for later
developments!
Painless's native support for regular expressions has syntax constructs: Painless's native support for regular expressions has syntax constructs:
* `/pattern/`: Pattern literals create patterns. This is the only way to create * `/pattern/`: Pattern literals create patterns. This is the only way to create

View File

@ -19,10 +19,18 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
/** /**
* Settings to use when compiling a script. * Settings to use when compiling a script.
*/ */
public final class CompilerSettings { public final class CompilerSettings {
/**
* Are regexes enabled? This is a node level setting because regexes break out of painless's lovely sandbox and can cause stack
* overflows and we can't analyze the regex to be sure it won't.
*/
public static final Setting<Boolean> REGEX_ENABLED = Setting.boolSetting("script.painless.regex.enabled", false, Property.NodeScope);
/** /**
* Constant to be used when specifying the maximum loop counter when compiling a script. * Constant to be used when specifying the maximum loop counter when compiling a script.
@ -55,6 +63,12 @@ public final class CompilerSettings {
*/ */
private int initialCallSiteDepth = 0; private int initialCallSiteDepth = 0;
/**
* Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
* <strong>looking</strong> regexes can cause stack overflows.
*/
private boolean regexesEnabled = false;
/** /**
* Returns the value for the cumulative total number of statements that can be made in all loops * Returns the value for the cumulative total number of statements that can be made in all loops
* in a script before an exception is thrown. This attempts to prevent infinite loops. Note if * in a script before an exception is thrown. This attempts to prevent infinite loops. Note if
@ -104,4 +118,20 @@ public final class CompilerSettings {
public void setInitialCallSiteDepth(int depth) { public void setInitialCallSiteDepth(int depth) {
this.initialCallSiteDepth = depth; this.initialCallSiteDepth = depth;
} }
/**
* Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
* <strong>looking</strong> regexes can cause stack overflows.
*/
public boolean areRegexesEnabled() {
return regexesEnabled;
}
/**
* Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
* <strong>looking</strong> regexes can cause stack overflows.
*/
public void setRegexesEnabled(boolean regexesEnabled) {
this.regexesEnabled = regexesEnabled;
}
} }

View File

@ -20,19 +20,21 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.ScriptPlugin; import org.elasticsearch.plugins.ScriptPlugin;
import org.elasticsearch.script.ScriptEngineRegistry;
import org.elasticsearch.script.ScriptEngineService; import org.elasticsearch.script.ScriptEngineService;
import org.elasticsearch.script.ScriptModule;
import java.util.Arrays;
import java.util.List;
/** /**
* Registers Painless as a plugin. * Registers Painless as a plugin.
*/ */
public final class PainlessPlugin extends Plugin implements ScriptPlugin { public final class PainlessPlugin extends Plugin implements ScriptPlugin {
// force to pare our definition at startup (not on the user's first script) // force to parse our definition at startup (not on the user's first script)
static { static {
Definition.VOID_TYPE.hashCode(); Definition.VOID_TYPE.hashCode();
} }
@ -41,4 +43,9 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin {
public ScriptEngineService getScriptEngineService(Settings settings) { public ScriptEngineService getScriptEngineService(Settings settings) {
return new PainlessScriptEngineService(settings); return new PainlessScriptEngineService(settings);
} }
@Override
public List<Setting<?>> getSettings() {
return Arrays.asList(CompilerSettings.REGEX_ENABLED);
}
} }

View File

@ -53,11 +53,6 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
*/ */
public static final String NAME = "painless"; public static final String NAME = "painless";
/**
* Default compiler settings to be used.
*/
private static final CompilerSettings DEFAULT_COMPILER_SETTINGS = new CompilerSettings();
/** /**
* Permissions context used during compilation. * Permissions context used during compilation.
*/ */
@ -74,12 +69,19 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
}); });
} }
/**
* Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside
* of {@link PainlessScriptEngineService#PainlessScriptEngineService(Settings)}.
*/
private final CompilerSettings defaultCompilerSettings = new CompilerSettings();
/** /**
* Constructor. * Constructor.
* @param settings The settings to initialize the engine with. * @param settings The settings to initialize the engine with.
*/ */
public PainlessScriptEngineService(final Settings settings) { public PainlessScriptEngineService(final Settings settings) {
super(settings); super(settings);
defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings));
} }
/** /**
@ -111,29 +113,36 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
if (params.isEmpty()) { if (params.isEmpty()) {
// Use the default settings. // Use the default settings.
compilerSettings = DEFAULT_COMPILER_SETTINGS; compilerSettings = defaultCompilerSettings;
} else { } else {
// Use custom settings specified by params. // Use custom settings specified by params.
compilerSettings = new CompilerSettings(); compilerSettings = new CompilerSettings();
Map<String, String> copy = new HashMap<>(params);
String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER);
// Except regexes enabled - this is a node level setting and can't be changed in the request.
compilerSettings.setRegexesEnabled(defaultCompilerSettings.areRegexesEnabled());
Map<String, String> copy = new HashMap<>(params);
String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER);
if (value != null) { if (value != null) {
compilerSettings.setMaxLoopCounter(Integer.parseInt(value)); compilerSettings.setMaxLoopCounter(Integer.parseInt(value));
} }
value = copy.remove(CompilerSettings.PICKY); value = copy.remove(CompilerSettings.PICKY);
if (value != null) { if (value != null) {
compilerSettings.setPicky(Boolean.parseBoolean(value)); compilerSettings.setPicky(Boolean.parseBoolean(value));
} }
value = copy.remove(CompilerSettings.INITIAL_CALL_SITE_DEPTH); value = copy.remove(CompilerSettings.INITIAL_CALL_SITE_DEPTH);
if (value != null) { if (value != null) {
compilerSettings.setInitialCallSiteDepth(Integer.parseInt(value)); compilerSettings.setInitialCallSiteDepth(Integer.parseInt(value));
} }
value = copy.remove(CompilerSettings.REGEX_ENABLED.getKey());
if (value != null) {
throw new IllegalArgumentException("[painless.regex.enabled] can only be set on node startup.");
}
if (!copy.isEmpty()) { if (!copy.isEmpty()) {
throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy); throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy);
} }

View File

@ -796,6 +796,11 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
@Override @Override
public ANode visitRegex(RegexContext ctx) { public ANode visitRegex(RegexContext ctx) {
if (false == settings.areRegexesEnabled()) {
throw location(ctx).createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] "
+ "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep "
+ "recursion and long loops."));
}
String text = ctx.REGEX().getText(); String text = ctx.REGEX().getText();
int lastSlash = text.lastIndexOf('/'); int lastSlash = text.lastIndexOf('/');
String pattern = text.substring(1, lastSlash); String pattern = text.substring(1, lastSlash);

View File

@ -19,17 +19,26 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
import org.elasticsearch.common.settings.Settings;
import java.nio.CharBuffer; import java.nio.CharBuffer;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashSet; import java.util.HashSet;
import java.util.regex.Pattern; import java.util.regex.Pattern;
import java.util.regex.PatternSyntaxException; import java.util.regex.PatternSyntaxException;
import static java.util.Collections.emptyMap;
import static java.util.Collections.singletonMap; import static java.util.Collections.singletonMap;
import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.containsString;
public class RegexTests extends ScriptTestCase { public class RegexTests extends ScriptTestCase {
@Override
protected Settings scriptEngineSettings() {
// Enable regexes just for this test. They are disabled by default.
return Settings.builder()
.put(CompilerSettings.REGEX_ENABLED.getKey(), true)
.build();
}
public void testPatternAfterReturn() { public void testPatternAfterReturn() {
assertEquals(true, exec("return 'foo' ==~ /foo/")); assertEquals(true, exec("return 'foo' ==~ /foo/"));
assertEquals(false, exec("return 'bar' ==~ /foo/")); assertEquals(false, exec("return 'bar' ==~ /foo/"));

View File

@ -45,7 +45,14 @@ public abstract class ScriptTestCase extends ESTestCase {
@Before @Before
public void setup() { public void setup() {
scriptEngine = new PainlessScriptEngineService(Settings.EMPTY); scriptEngine = new PainlessScriptEngineService(scriptEngineSettings());
}
/**
* Settings used to build the script engine. Override to customize settings like {@link RegexTests} does to enable regexes.
*/
protected Settings scriptEngineSettings() {
return Settings.EMPTY;
} }
/** Compiles and returns the result of {@code script} */ /** Compiles and returns the result of {@code script} */
@ -71,6 +78,7 @@ public abstract class ScriptTestCase extends ESTestCase {
if (picky) { if (picky) {
CompilerSettings pickySettings = new CompilerSettings(); CompilerSettings pickySettings = new CompilerSettings();
pickySettings.setPicky(true); pickySettings.setPicky(true);
pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings()));
Walker.buildPainlessTree(getTestName(), script, pickySettings, null); Walker.buildPainlessTree(getTestName(), script, pickySettings, null);
} }
// test actual script execution // test actual script execution

View File

@ -20,14 +20,13 @@
package org.elasticsearch.painless; package org.elasticsearch.painless;
import org.apache.lucene.util.Constants; import org.apache.lucene.util.Constants;
import org.elasticsearch.script.ScriptException;
import java.lang.invoke.WrongMethodTypeException; import java.lang.invoke.WrongMethodTypeException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import static java.util.Collections.emptyMap; import static java.util.Collections.emptyMap;
import static org.hamcrest.Matchers.containsString; import static java.util.Collections.singletonMap;
public class WhenThingsGoWrongTests extends ScriptTestCase { public class WhenThingsGoWrongTests extends ScriptTestCase {
public void testNullPointer() { public void testNullPointer() {
@ -234,4 +233,16 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
exec("void recurse(int x, int y) {recurse(x, y)} recurse(1, 2);"); exec("void recurse(int x, int y) {recurse(x, y)} recurse(1, 2);");
}); });
} }
public void testRegexDisabledByDefault() {
IllegalStateException e = expectThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/"));
assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. "
+ "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage());
}
public void testCanNotOverrideRegexEnabled() {
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
() -> exec("", null, singletonMap(CompilerSettings.REGEX_ENABLED.getKey(), "true"), null, false));
assertEquals("[painless.regex.enabled] can only be set on node startup.", e.getMessage());
}
} }

View File

@ -0,0 +1,33 @@
---
"Regex in update fails":
- do:
index:
index: test_1
type: test
id: 1
body:
foo: bar
count: 1
- do:
catch: /Regexes are disabled. Set \[script.painless.regex.enabled\] to \[true\] in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep recursion and long loops./
update:
index: test_1
type: test
id: 1
body:
script:
lang: painless
inline: "ctx._source.foo = params.bar ==~ /cat/"
params: { bar: 'xxx' }
---
"Regex enabled is not a dynamic setting":
- do:
catch: /setting \[script.painless.regex.enabled\], not dynamically updateable/
cluster.put_settings:
body:
transient:
script.painless.regex.enabled: true

View File

@ -143,15 +143,14 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase {
} }
private void setupNode() throws Exception { private void setupNode() throws Exception {
Path dataDir = createTempDir(); Path clusterDir = createTempDir();
Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName()));
try (InputStream stream = PercolatorBackwardsCompatibilityTests.class. try (InputStream stream = PercolatorBackwardsCompatibilityTests.class.
getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) { getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) {
TestUtil.unzip(stream, clusterDir); TestUtil.unzip(stream, clusterDir);
} }
Settings.Builder nodeSettings = Settings.builder() Settings.Builder nodeSettings = Settings.builder()
.put(Environment.PATH_DATA_SETTING.getKey(), dataDir); .put(Environment.PATH_DATA_SETTING.getKey(), clusterDir);
internalCluster().startNode(nodeSettings.build()); internalCluster().startNode(nodeSettings.build());
ensureGreen(INDEX_NAME); ensureGreen(INDEX_NAME);
} }

View File

@ -74,11 +74,11 @@ public class ListPluginsCommandTests extends ESTestCase {
return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n")); return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n"));
} }
static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException { static void buildFakePlugin(Environment env, String description, String name, String classname, String version) throws IOException {
PluginTestUtil.writeProperties(env.pluginsFile().resolve(name), PluginTestUtil.writeProperties(env.pluginsFile().resolve(name),
"description", description, "description", description,
"name", name, "name", name,
"version", "1.0", "version", version,
"elasticsearch.version", Version.CURRENT.toString(), "elasticsearch.version", Version.CURRENT.toString(),
"java.version", System.getProperty("java.specification.version"), "java.version", System.getProperty("java.specification.version"),
"classname", classname); "classname", classname);
@ -97,43 +97,43 @@ public class ListPluginsCommandTests extends ESTestCase {
} }
public void testOnePlugin() throws Exception { public void testOnePlugin() throws Exception {
buildFakePlugin(env, "fake desc", "fake", "org.fake"); buildFakePlugin(env, "fake desc", "fake", "org.fake", "1.0.0");
MockTerminal terminal = listPlugins(home); MockTerminal terminal = listPlugins(home);
assertEquals(terminal.getOutput(), buildMultiline("fake")); assertEquals(terminal.getOutput(), buildMultiline("fake@1.0.0"));
} }
public void testTwoPlugins() throws Exception { public void testTwoPlugins() throws Exception {
buildFakePlugin(env, "fake desc", "fake1", "org.fake"); buildFakePlugin(env, "fake desc", "fake1", "org.fake", "1.2.3");
buildFakePlugin(env, "fake desc 2", "fake2", "org.fake"); buildFakePlugin(env, "fake desc 2", "fake2", "org.fake", "6.5.4");
MockTerminal terminal = listPlugins(home); MockTerminal terminal = listPlugins(home);
assertEquals(terminal.getOutput(), buildMultiline("fake1", "fake2")); assertEquals(terminal.getOutput(), buildMultiline("fake1@1.2.3", "fake2@6.5.4"));
} }
public void testPluginWithVerbose() throws Exception { public void testPluginWithVerbose() throws Exception {
buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake"); buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake", "1.0.0");
String[] params = { "-v" }; String[] params = { "-v" };
MockTerminal terminal = listPlugins(home, params); MockTerminal terminal = listPlugins(home, params);
assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin", assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin@1.0.0",
"- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0", " * Classname: org.fake")); "- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0.0", " * Classname: org.fake"));
} }
public void testPluginWithVerboseMultiplePlugins() throws Exception { public void testPluginWithVerboseMultiplePlugins() throws Exception {
buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.2.3");
buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "6.5.4");
String[] params = { "-v" }; String[] params = { "-v" };
MockTerminal terminal = listPlugins(home, params); MockTerminal terminal = listPlugins(home, params);
assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(),
"fake_plugin1", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0", "fake_plugin1@1.2.3", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.2.3",
" * Classname: org.fake", "fake_plugin2", "- Plugin information:", "Name: fake_plugin2", " * Classname: org.fake", "fake_plugin2@6.5.4", "- Plugin information:", "Name: fake_plugin2",
"Description: fake desc 2", "Version: 1.0", " * Classname: org.fake2")); "Description: fake desc 2", "Version: 6.5.4", " * Classname: org.fake2"));
} }
public void testPluginWithoutVerboseMultiplePlugins() throws Exception { public void testPluginWithoutVerboseMultiplePlugins() throws Exception {
buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake"); buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.0.0");
buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2"); buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "1.0.0");
MockTerminal terminal = listPlugins(home, new String[0]); MockTerminal terminal = listPlugins(home, new String[0]);
String output = terminal.getOutput(); String output = terminal.getOutput();
assertEquals(output, buildMultiline("fake_plugin1", "fake_plugin2")); assertEquals(output, buildMultiline("fake_plugin1@1.0.0", "fake_plugin2@1.0.0"));
} }
public void testPluginWithoutDescriptorFile() throws Exception{ public void testPluginWithoutDescriptorFile() throws Exception{

View File

@ -37,7 +37,8 @@ List<String> availableBoxes = [
'sles-12', 'sles-12',
'ubuntu-1204', 'ubuntu-1204',
'ubuntu-1404', 'ubuntu-1404',
'ubuntu-1504' 'ubuntu-1504',
'ubuntu-1604'
] ]
String vagrantBoxes = getProperties().get('vagrant.boxes', 'sample') String vagrantBoxes = getProperties().get('vagrant.boxes', 'sample')
@ -122,7 +123,7 @@ task stop {
Set<String> getVersions() { Set<String> getVersions() {
Node xml Node xml
new URL('http://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s -> new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
xml = new XmlParser().parse(s) xml = new XmlParser().parse(s)
} }
return new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /2\.\d\.\d/ }) return new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /2\.\d\.\d/ })

View File

@ -300,7 +300,7 @@ fi
} }
@test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" { @test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" {
"$ESHOME/bin/elasticsearch-plugin" list > /tmp/installed "$ESHOME/bin/elasticsearch-plugin" list | cut -d'@' -f1 > /tmp/installed
compare_plugins_list "/tmp/installed" "'plugins list'" compare_plugins_list "/tmp/installed" "'plugins list'"
} }

View File

@ -22,8 +22,8 @@
- is_true: nodes.os.mem.total_in_bytes - is_true: nodes.os.mem.total_in_bytes
- is_true: nodes.os.mem.free_in_bytes - is_true: nodes.os.mem.free_in_bytes
- is_true: nodes.os.mem.used_in_bytes - is_true: nodes.os.mem.used_in_bytes
- is_true: nodes.os.mem.free_percent - gte: { nodes.os.mem.free_percent: 0 }
- is_true: nodes.os.mem.used_percent - gte: { nodes.os.mem.used_percent: 0 }
- is_true: nodes.process - is_true: nodes.process
- is_true: nodes.jvm - is_true: nodes.jvm
- is_true: nodes.fs - is_true: nodes.fs

View File

@ -38,10 +38,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationD
import org.elasticsearch.common.settings.ClusterSettings; import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress; import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.gateway.AsyncShardFetch;
import org.elasticsearch.gateway.GatewayAllocator; import org.elasticsearch.gateway.GatewayAllocator;
import org.elasticsearch.gateway.ReplicaShardAllocator;
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
import org.elasticsearch.test.ESTestCase; import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.gateway.NoopGatewayAllocator; import org.elasticsearch.test.gateway.NoopGatewayAllocator;
@ -209,14 +206,6 @@ public abstract class ESAllocationTestCase extends ESTestCase {
* Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet. * Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet.
*/ */
protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator { protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator {
private final ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) {
@Override
protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData>
fetchData(ShardRouting shard, RoutingAllocation allocation) {
return new AsyncShardFetch.FetchResult<>(shard.shardId(), null, Collections.emptySet(), Collections.emptySet());
}
};
public DelayedShardsMockGatewayAllocator() { public DelayedShardsMockGatewayAllocator() {
super(Settings.EMPTY, null, null); super(Settings.EMPTY, null, null);
@ -236,7 +225,9 @@ public abstract class ESAllocationTestCase extends ESTestCase {
if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) { if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
continue; continue;
} }
replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes()); if (shard.unassignedInfo().isDelayed()) {
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes());
}
} }
} }
} }

View File

@ -0,0 +1,477 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.index.shard;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.apache.lucene.document.Document;
import org.apache.lucene.index.IndexNotFoundException;
import org.apache.lucene.index.LeafReader;
import org.apache.lucene.index.LeafReaderContext;
import org.apache.lucene.store.Directory;
import org.apache.lucene.util.Bits;
import org.apache.lucene.util.IOUtils;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.RecoverySource;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
import org.elasticsearch.cluster.routing.ShardRoutingState;
import org.elasticsearch.cluster.routing.TestShardRouting;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.lucene.uid.Versions;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.LocalTransportAddress;
import org.elasticsearch.common.unit.ByteSizeUnit;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.env.NodeEnvironment;
import org.elasticsearch.index.IndexSettings;
import org.elasticsearch.index.MapperTestUtils;
import org.elasticsearch.index.VersionType;
import org.elasticsearch.index.cache.IndexCache;
import org.elasticsearch.index.cache.query.DisabledQueryCache;
import org.elasticsearch.index.engine.Engine;
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
import org.elasticsearch.index.fielddata.IndexFieldDataService;
import org.elasticsearch.index.mapper.MapperService;
import org.elasticsearch.index.mapper.SourceToParse;
import org.elasticsearch.index.mapper.Uid;
import org.elasticsearch.index.mapper.UidFieldMapper;
import org.elasticsearch.index.similarity.SimilarityService;
import org.elasticsearch.index.store.DirectoryService;
import org.elasticsearch.index.store.Store;
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
import org.elasticsearch.indices.recovery.RecoveryFailedException;
import org.elasticsearch.indices.recovery.RecoverySourceHandler;
import org.elasticsearch.indices.recovery.RecoveryState;
import org.elasticsearch.indices.recovery.RecoveryTarget;
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
import org.elasticsearch.test.DummyShardLock;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.function.BiFunction;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.hasSize;
/**
* A base class for unit tests that need to create and shutdown {@link IndexShard} instances easily,
* containing utilities for shard creation and recoveries. See {{@link #newShard(boolean)}} and
* {@link #newStartedShard()} for a good starting points
*/
public abstract class IndexShardTestCase extends ESTestCase {
protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() {
@Override
public void onRecoveryDone(RecoveryState state) {
}
@Override
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
throw new AssertionError(e);
}
};
protected ThreadPool threadPool;
@Override
public void setUp() throws Exception {
super.setUp();
threadPool = new TestThreadPool(getClass().getName());
}
@Override
public void tearDown() throws Exception {
try {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
} finally {
super.tearDown();
}
}
private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
final ShardId shardId = shardPath.getShardId();
final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
@Override
public Directory newDirectory() throws IOException {
return newFSDirectory(shardPath.resolveIndex());
}
@Override
public long throttleTimeInNanos() {
return 0;
}
};
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
}
/**
* creates a new initializing shard. The shard will have its own unique data path.
*
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
* (ready to recover from another shard)
*/
protected IndexShard newShard(boolean primary) throws IOException {
ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), "n1", primary,
ShardRoutingState.INITIALIZING,
primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
return newShard(shardRouting);
}
/**
* creates a new initializing shard. The shard will have its own unique data path.
*
* @param shardRouting the {@link ShardRouting} to use for this shard
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting shardRouting, IndexingOperationListener... listeners) throws IOException {
assert shardRouting.initializing() : shardRouting;
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
.build();
IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName())
.settings(settings)
.primaryTerm(0, 1);
return newShard(shardRouting, metaData.build(), listeners);
}
/**
* creates a new initializing shard. The shard will have its own unique data path.
*
* @param shardId the shard id to use
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
* (ready to recover from another shard)
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperationListener... listeners) throws IOException {
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAsciiOfLength(5), primary,
ShardRoutingState.INITIALIZING,
primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
return newShard(shardRouting, listeners);
}
/**
* creates a new initializing shard. The shard will will be put in its proper path under the
* supplied node id.
*
* @param shardId the shard id to use
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
* (ready to recover from another shard)
*/
protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData,
@Nullable IndexSearcherWrapper searcherWrapper) throws IOException {
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING,
primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
return newShard(shardRouting, indexMetaData, searcherWrapper);
}
/**
* creates a new initializing shard. The shard will will be put in its proper path under the
* current node id the shard is assigned to.
*
* @param routing shard routing to use
* @param indexMetaData indexMetaData for the shard, including any mapping
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, IndexingOperationListener... listeners)
throws IOException {
return newShard(routing, indexMetaData, null, listeners);
}
/**
* creates a new initializing shard. The shard will will be put in its proper path under the
* current node id the shard is assigned to.
*
* @param routing shard routing to use
* @param indexMetaData indexMetaData for the shard, including any mapping
* @param indexSearcherWrapper an optional wrapper to be used during searchers
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData,
@Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingOperationListener... listeners)
throws IOException {
// add node id as name to settings for popper logging
final ShardId shardId = routing.shardId();
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, listeners);
}
/**
* creates a new initializing shard.
*
* @param routing shard routing to use
* @param shardPath path to use for shard data
* @param indexMetaData indexMetaData for the shard, including any mapping
* @param indexSearcherWrapper an optional wrapper to be used during searchers
* @param listeners an optional set of listeners to add to the shard
*/
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData,
@Nullable IndexSearcherWrapper indexSearcherWrapper,
IndexingOperationListener... listeners) throws IOException {
final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
final IndexShard indexShard;
final Store store = createStore(indexSettings, shardPath);
boolean success = false;
try {
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings());
for (ObjectObjectCursor<String, MappingMetaData> typeMapping : indexMetaData.getMappings()) {
mapperService.merge(typeMapping.key, typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
}
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
final IndexEventListener indexEventListener = new IndexEventListener() {
};
final Engine.Warmer warmer = searcher -> {
};
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
});
IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache,
new NoneCircuitBreakerService(), mapperService);
indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService,
indexFieldDataService, null, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer,
Collections.emptyList(), Arrays.asList(listeners));
success = true;
} finally {
if (success == false) {
IOUtils.close(store);
}
}
return indexShard;
}
/**
* Takes an existing shard, closes it and and starts a new initialing shard at the same location
*
* @param listeners new listerns to use for the newly created shard
*/
protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException {
final ShardRouting shardRouting = current.routingEntry();
return reinitShard(current, ShardRoutingHelper.initWithSameId(shardRouting,
shardRouting.primary() ? RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
), listeners);
}
/**
* Takes an existing shard, closes it and and starts a new initialing shard at the same location
*
* @param routing the shard routing to use for the newly created shard.
* @param listeners new listerns to use for the newly created shard
*/
protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException {
closeShards(current);
return newShard(routing, current.shardPath(), current.indexSettings().getIndexMetaData(), null, listeners);
}
/**
* creates a new empyu shard and starts it. The shard will be either a replica or a primary.
*/
protected IndexShard newStartedShard() throws IOException {
return newStartedShard(randomBoolean());
}
/**
* creates a new empty shard and starts it.
*
* @param primary controls whether the shard will be a primary or a replica.
*/
protected IndexShard newStartedShard(boolean primary) throws IOException {
IndexShard shard = newShard(primary);
if (primary) {
recoveryShardFromStore(shard);
} else {
recoveryEmptyReplica(shard);
}
return shard;
}
protected void closeShards(IndexShard... shards) throws IOException {
closeShards(Arrays.asList(shards));
}
protected void closeShards(Iterable<IndexShard> shards) throws IOException {
for (IndexShard shard : shards) {
if (shard != null) {
try {
shard.close("test", false);
} finally {
IOUtils.close(shard.store());
}
}
}
}
protected void recoveryShardFromStore(IndexShard primary) throws IOException {
primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(),
getFakeDiscoNode(primary.routingEntry().currentNodeId()),
null));
primary.recoverFromStore();
primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry()));
}
protected void recoveryEmptyReplica(IndexShard replica) throws IOException {
IndexShard primary = null;
try {
primary = newStartedShard(true);
recoverReplica(replica, primary);
} finally {
closeShards(primary);
}
}
private DiscoveryNode getFakeDiscoNode(String id) {
return new DiscoveryNode(id, new LocalTransportAddress("_fake_" + id), Version.CURRENT);
}
/** recovers a replica from the given primary **/
protected void recoverReplica(IndexShard replica, IndexShard primary) throws IOException {
recoverReplica(replica, primary,
(r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, version -> {
}),
true);
}
/**
* Recovers a replica from the give primary, allow the user to supply a custom recovery target.
* A typical usage of a custome recovery target is to assert things in the various stages of recovery
*
* @param markAsRecovering set to false if you have already marked the replica as recovering
*/
protected void recoverReplica(IndexShard replica, IndexShard primary,
BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
boolean markAsRecovering)
throws IOException {
final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId());
final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId());
if (markAsRecovering) {
replica.markAsRecovering("remote",
new RecoveryState(replica.routingEntry(), pNode, rNode));
} else {
assertEquals(replica.state(), IndexShardState.RECOVERING);
}
replica.prepareForIndexRecovery();
RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
getMetadataSnapshotOrEmpty(replica), false, 0);
RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {
},
(int) ByteSizeUnit.MB.toKB(1), logger);
recovery.recoverToTarget();
recoveryTarget.markAsDone();
replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
}
private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
Store.MetadataSnapshot result;
try {
result = replica.snapshotStoreMetadata();
} catch (IndexNotFoundException e) {
// OK!
result = Store.MetadataSnapshot.EMPTY;
} catch (IOException e) {
logger.warn("failed read store, treating as empty", e);
result = Store.MetadataSnapshot.EMPTY;
}
return result;
}
protected Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
shard.refresh("get_uids");
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
Set<Uid> ids = new HashSet<>();
for (LeafReaderContext leafContext : searcher.reader().leaves()) {
LeafReader reader = leafContext.reader();
Bits liveDocs = reader.getLiveDocs();
for (int i = 0; i < reader.maxDoc(); i++) {
if (liveDocs == null || liveDocs.get(i)) {
Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
}
}
}
return ids;
}
}
protected void assertDocCount(IndexShard shard, int docDount) throws IOException {
assertThat(getShardDocUIDs(shard), hasSize(docDount));
}
protected void assertDocs(IndexShard shard, Uid... uids) throws IOException {
final Set<Uid> shardDocUIDs = getShardDocUIDs(shard);
assertThat(shardDocUIDs, contains(uids));
assertThat(shardDocUIDs, hasSize(uids.length));
}
protected Engine.Index indexDoc(IndexShard shard, String type, String id) {
return indexDoc(shard, type, id, "{}");
}
protected Engine.Index indexDoc(IndexShard shard, String type, String id, String source) {
final Engine.Index index;
if (shard.routingEntry().primary()) {
index = shard.prepareIndexOnPrimary(
SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)),
Versions.MATCH_ANY, VersionType.INTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
} else {
index = shard.prepareIndexOnReplica(
SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)),
1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
}
shard.index(index);
return index;
}
protected Engine.Delete deleteDoc(IndexShard shard, String type, String id) {
final Engine.Delete delete;
if (shard.routingEntry().primary()) {
delete = shard.prepareDeleteOnPrimary(type, id, Versions.MATCH_ANY, VersionType.INTERNAL);
} else {
delete = shard.prepareDeleteOnPrimary(type, id, 1, VersionType.EXTERNAL);
}
shard.delete(delete);
return delete;
}
protected void flushShard(IndexShard shard) {
flushShard(shard, false);
}
protected void flushShard(IndexShard shard, boolean force) {
shard.flush(new FlushRequest(shard.shardId().getIndexName()).force(force));
}
}

View File

@ -19,18 +19,21 @@
package org.elasticsearch.node; package org.elasticsearch.node;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.common.util.MockBigArrays; import org.elasticsearch.common.util.MockBigArrays;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.indices.breaker.CircuitBreakerService; import org.elasticsearch.indices.breaker.CircuitBreakerService;
import org.elasticsearch.node.internal.InternalSettingsPreparer; import org.elasticsearch.node.internal.InternalSettingsPreparer;
import org.elasticsearch.plugins.Plugin; import org.elasticsearch.plugins.Plugin;
import org.elasticsearch.plugins.SearchPlugin; import org.elasticsearch.script.ScriptService;
import org.elasticsearch.search.MockSearchService; import org.elasticsearch.search.MockSearchService;
import org.elasticsearch.search.SearchService; import org.elasticsearch.search.SearchService;
import org.elasticsearch.search.fetch.FetchPhase;
import org.elasticsearch.threadpool.ThreadPool;
import java.util.Collection; import java.util.Collection;
import java.util.List;
/** /**
* A node for testing which allows: * A node for testing which allows:
@ -62,11 +65,15 @@ public class MockNode extends Node {
return new MockBigArrays(settings, circuitBreakerService); return new MockBigArrays(settings, circuitBreakerService);
} }
@Override @Override
protected Class<? extends SearchService> pickSearchServiceImplementation() { protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
FetchPhase fetchPhase) {
if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) { if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) {
return super.pickSearchServiceImplementation(); return super.newSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
} }
return MockSearchService.class; return new MockSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
} }
} }

View File

@ -20,9 +20,6 @@
package org.elasticsearch.search; package org.elasticsearch.search;
import org.elasticsearch.cluster.service.ClusterService; import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays; import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.indices.IndicesService; import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.MockNode; import org.elasticsearch.node.MockNode;
@ -69,11 +66,10 @@ public class MockSearchService extends SearchService {
ACTIVE_SEARCH_CONTEXTS.remove(context); ACTIVE_SEARCH_CONTEXTS.remove(context);
} }
@Inject public MockSearchService(ClusterService clusterService,
public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService,
IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService, IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService,
BigArrays bigArrays, FetchPhase fetchPhase) { BigArrays bigArrays, FetchPhase fetchPhase) {
super(settings, clusterSettings, clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase); super(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
} }
@Override @Override

View File

@ -1045,12 +1045,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
scriptSettings.addAll(pluginsService.getPluginSettings()); scriptSettings.addAll(pluginsService.getPluginSettings());
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED); scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
SettingsModule settingsModule = new SettingsModule(nodeSettings, scriptSettings, pluginsService.getPluginSettingsFilter()); SettingsModule settingsModule = new SettingsModule(nodeSettings, scriptSettings, pluginsService.getPluginSettingsFilter());
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)) { searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
@Override
protected void configureSearch() {
// Skip me
}
};
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)) { IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)) {
@Override @Override
public void configure() { public void configure() {

View File

@ -42,11 +42,16 @@ import static junit.framework.TestCase.fail;
public class ClusterServiceUtils { public class ClusterServiceUtils {
public static ClusterService createClusterService(ThreadPool threadPool) { public static ClusterService createClusterService(ThreadPool threadPool) {
DiscoveryNode discoveryNode = new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT);
return createClusterService(threadPool, discoveryNode);
}
public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode) {
ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(), ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(),
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS), new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
threadPool); threadPool);
clusterService.setLocalNode(new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(), clusterService.setLocalNode(localNode);
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT));
clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) { clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {
@Override @Override
public void connectToAddedNodes(ClusterChangedEvent event) { public void connectToAddedNodes(ClusterChangedEvent event) {

View File

@ -2064,8 +2064,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
} }
throw new IllegalStateException(builder.toString()); throw new IllegalStateException(builder.toString());
} }
Path src = list[0]; Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER);
Path dest = dataDir.resolve(internalCluster().getClusterName()); Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER);
assertTrue(Files.exists(src)); assertTrue(Files.exists(src));
Files.move(src, dest); Files.move(src, dest);
assertFalse(Files.exists(src)); assertFalse(Files.exists(src));

Some files were not shown because too many files have changed in this diff Show More