Merge branch 'master' into feature/rank-eval
This commit is contained in:
commit
9e2ec53458
|
@ -364,6 +364,7 @@ These are the linux flavors the Vagrantfile currently supports:
|
|||
* ubuntu-1204 aka precise
|
||||
* ubuntu-1404 aka trusty
|
||||
* ubuntu-1504 aka vivid
|
||||
* ubuntu-1604 aka xenial
|
||||
* debian-8 aka jessie, the current debian stable distribution
|
||||
* centos-6
|
||||
* centos-7
|
||||
|
|
|
@ -37,6 +37,13 @@ Vagrant.configure(2) do |config|
|
|||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
config.vm.define "ubuntu-1604" do |config|
|
||||
config.vm.box = "elastic/ubuntu-16.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
|
||||
# get the sun jdk on there just aren't worth it. We have jessie for testing
|
||||
# debian and it works fine.
|
||||
|
|
|
@ -157,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
private static String findJavaHome() {
|
||||
String javaHome = System.getenv('JAVA_HOME')
|
||||
if (javaHome == null) {
|
||||
if (System.getProperty("idea.active") != null) {
|
||||
if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) {
|
||||
// intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with
|
||||
javaHome = Jvm.current().javaHome
|
||||
} else {
|
||||
|
@ -405,9 +405,9 @@ class BuildPlugin implements Plugin<Project> {
|
|||
//options.incremental = true
|
||||
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// hack until gradle supports java 9's new "-release" arg
|
||||
// hack until gradle supports java 9's new "--release" arg
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-release' << '8'
|
||||
options.compilerArgs << '--release' << '8'
|
||||
project.sourceCompatibility = null
|
||||
project.targetCompatibility = null
|
||||
}
|
||||
|
|
|
@ -38,17 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -67,25 +61,15 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
|
|||
*/
|
||||
public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
BigArrays bigArrays, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
|
||||
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
this.searchService = searchService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,29 +145,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
|
||||
@Override
|
||||
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
|
||||
boolean valid;
|
||||
String explanation = null;
|
||||
String error = null;
|
||||
Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");
|
||||
|
||||
DefaultSearchContext searchContext = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
|
||||
indexService, indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(),
|
||||
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
|
||||
request.nowInMillis(), request.filteringAliases());
|
||||
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
|
||||
SearchContext.setCurrent(searchContext);
|
||||
try {
|
||||
searchContext.parsedQuery(searchContext.getQueryShardContext().toQuery(request.query()));
|
||||
searchContext.preProcess();
|
||||
|
||||
ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
|
||||
searchContext.parsedQuery(parsedQuery);
|
||||
searchContext.preProcess(request.rewrite());
|
||||
valid = true;
|
||||
if (request.rewrite()) {
|
||||
explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
|
||||
} else if (request.explain()) {
|
||||
explanation = searchContext.filteredQuery().query().toString();
|
||||
}
|
||||
explanation = explain(searchContext, request.rewrite());
|
||||
} catch (QueryShardException|ParsingException e) {
|
||||
valid = false;
|
||||
error = e.getDetailedMessage();
|
||||
|
@ -191,19 +166,18 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
valid = false;
|
||||
error = e.getMessage();
|
||||
} finally {
|
||||
searchContext.close();
|
||||
SearchContext.removeCurrent();
|
||||
Releasables.close(searchContext, () -> SearchContext.removeCurrent());
|
||||
}
|
||||
|
||||
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
|
||||
}
|
||||
|
||||
private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException {
|
||||
Query queryRewrite = searcher.rewrite(query);
|
||||
if (queryRewrite instanceof MatchNoDocsQuery) {
|
||||
return query.toString();
|
||||
private String explain(SearchContext context, boolean rewritten) throws IOException {
|
||||
Query query = context.query();
|
||||
if (rewritten && query instanceof MatchNoDocsQuery) {
|
||||
return context.parsedQuery().query().toString();
|
||||
} else {
|
||||
return queryRewrite.toString();
|
||||
return query.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,20 +31,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
import org.elasticsearch.search.rescore.RescoreSearchContext;
|
||||
|
@ -60,26 +54,15 @@ import java.io.IOException;
|
|||
// TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain.
|
||||
public class TransportExplainAction extends TransportSingleShardAction<ExplainRequest, ExplainResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
FetchPhase fetchPhase) {
|
||||
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ExplainRequest::new, ThreadPool.Names.GET);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
this.searchService = searchService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,23 +87,19 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
|
||||
@Override
|
||||
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
|
||||
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
|
||||
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
|
||||
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
|
||||
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
|
||||
SearchContext context = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
|
||||
result.searcher(), indexService, indexShard, scriptService, bigArrays,
|
||||
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(context);
|
||||
|
||||
Engine.GetResult result = null;
|
||||
try {
|
||||
result = context.indexShard().get(new Engine.Get(false, uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
|
||||
context.preProcess();
|
||||
context.preProcess(true);
|
||||
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
for (RescoreSearchContext ctx : context.rescore()) {
|
||||
|
@ -131,7 +110,8 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
|
||||
// because we are working in the same searcher in engineGetResult we can be sure that a
|
||||
// doc isn't deleted between the initial get and this call.
|
||||
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
|
||||
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.fields(),
|
||||
request.fetchSourceContext());
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
|
||||
} else {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
|
||||
|
@ -139,8 +119,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Could not explain", e);
|
||||
} finally {
|
||||
context.close();
|
||||
SearchContext.removeCurrent();
|
||||
Releasables.close(result, context, () -> SearchContext.removeCurrent());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,7 +61,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private boolean ignoreErrorsOnGeneratedFields;
|
||||
|
||||
public GetRequest() {
|
||||
type = "_all";
|
||||
|
@ -248,19 +247,10 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
return this;
|
||||
}
|
||||
|
||||
public GetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
|
||||
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public VersionType versionType() {
|
||||
return this.versionType;
|
||||
}
|
||||
|
||||
public boolean ignoreErrorsOnGeneratedFields() {
|
||||
return ignoreErrorsOnGeneratedFields;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -278,7 +268,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
}
|
||||
}
|
||||
realtime = in.readBoolean();
|
||||
this.ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
|
||||
this.versionType = VersionType.fromValue(in.readByte());
|
||||
this.version = in.readLong();
|
||||
|
@ -304,7 +293,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
}
|
||||
}
|
||||
out.writeBoolean(realtime);
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeLong(version);
|
||||
out.writeOptionalStreamable(fetchSourceContext);
|
||||
|
|
|
@ -155,11 +155,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
|
|||
return this;
|
||||
}
|
||||
|
||||
public GetRequestBuilder setIgnoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
|
||||
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the version, which will cause the get operation to only be performed if a matching
|
||||
* version exists and no changes happened on the doc since then.
|
||||
|
|
|
@ -262,8 +262,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
String preference;
|
||||
boolean realtime = true;
|
||||
boolean refresh;
|
||||
public boolean ignoreErrorsOnGeneratedFields = false;
|
||||
|
||||
List<Item> items = new ArrayList<>();
|
||||
|
||||
public List<Item> getItems() {
|
||||
|
@ -338,11 +336,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
}
|
||||
|
||||
|
||||
public MultiGetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
|
||||
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
|
||||
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
|
||||
}
|
||||
|
@ -510,7 +503,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
preference = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
realtime = in.readBoolean();
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
|
||||
int size = in.readVInt();
|
||||
items = new ArrayList<>(size);
|
||||
|
@ -525,7 +517,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
out.writeOptionalString(preference);
|
||||
out.writeBoolean(refresh);
|
||||
out.writeBoolean(realtime);
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
|
||||
out.writeVInt(items.size());
|
||||
for (Item item : items) {
|
||||
|
|
|
@ -80,9 +80,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest
|
|||
request.realtime(realtime);
|
||||
return this;
|
||||
}
|
||||
|
||||
public MultiGetRequestBuilder setIgnoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
|
||||
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
private String preference;
|
||||
boolean realtime = true;
|
||||
boolean refresh;
|
||||
boolean ignoreErrorsOnGeneratedFields = false;
|
||||
|
||||
IntArrayList locations;
|
||||
List<MultiGetRequest.Item> items;
|
||||
|
@ -52,7 +51,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
preference = multiGetRequest.preference;
|
||||
realtime = multiGetRequest.realtime;
|
||||
refresh = multiGetRequest.refresh;
|
||||
ignoreErrorsOnGeneratedFields = multiGetRequest.ignoreErrorsOnGeneratedFields;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,11 +85,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
return this;
|
||||
}
|
||||
|
||||
public MultiGetShardRequest ignoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
|
||||
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return this.refresh;
|
||||
}
|
||||
|
@ -130,7 +123,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
preference = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
realtime = in.readBoolean();
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -146,11 +138,5 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
out.writeOptionalString(preference);
|
||||
out.writeBoolean(refresh);
|
||||
out.writeBoolean(realtime);
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
|
||||
}
|
||||
|
||||
public boolean ignoreErrorsOnGeneratedFields() {
|
||||
return ignoreErrorsOnGeneratedFields;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
}
|
||||
|
||||
GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
|
||||
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
|
||||
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
|
||||
return new GetResponse(result);
|
||||
}
|
||||
|
||||
|
|
|
@ -88,13 +88,15 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
|||
for (int i = 0; i < request.locations.size(); i++) {
|
||||
MultiGetRequest.Item item = request.items.get(i);
|
||||
try {
|
||||
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(), item.versionType(), item.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
|
||||
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(),
|
||||
item.versionType(), item.fetchSourceContext());
|
||||
response.add(request.locations.get(i), new GetResponse(getResult));
|
||||
} catch (Exception e) {
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
throw (ElasticsearchException) e;
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
|
||||
item.type(), item.id()), e);
|
||||
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,8 +40,6 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
|
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.controller;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import com.carrotsearch.hppc.ObjectObjectHashMap;
|
||||
|
@ -89,8 +89,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
private final ScriptService scriptService;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
|
||||
SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
|
||||
super(settings);
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
|
@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionRunnable;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
|
|
|
@ -29,8 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
|
|
|
@ -17,17 +17,15 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.action;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -75,8 +73,7 @@ public class SearchTransportService extends AbstractComponent {
|
|||
private final TransportService transportService;
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
|
||||
SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.searchService = searchService;
|
|
@ -32,7 +32,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -53,11 +53,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
|
||||
@Inject
|
||||
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
ClusterService clusterService, SearchService searchService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,10 +29,11 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -53,13 +54,13 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
|
||||
TransportService transportService, SearchTransportService searchTransportService,
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
|
||||
TransportService transportService, SearchService searchService,
|
||||
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,8 +26,9 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -45,15 +46,15 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
|
||||
TransportService transportService,
|
||||
ClusterService clusterService, SearchService searchService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService, searchService);
|
||||
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -76,7 +76,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
public Result prepare(UpdateRequest request, IndexShard indexShard) {
|
||||
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
||||
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
|
||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
|
||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
|
||||
return prepare(indexShard.shardId(), request, getResult);
|
||||
}
|
||||
|
||||
|
|
|
@ -257,11 +257,6 @@ final class Security {
|
|||
for (Path path : environment.dataFiles()) {
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
// TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
|
||||
// https://github.com/elastic/elasticsearch/issues/20391
|
||||
for (Path path : environment.dataWithClusterFiles()) {
|
||||
addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.repoFiles()) {
|
||||
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
|
|
|
@ -607,23 +607,21 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
add = false;
|
||||
expression = expression.substring(1);
|
||||
}
|
||||
if (result == null) {
|
||||
// add all the previous ones...
|
||||
result = new HashSet<>(expressions.subList(0, i));
|
||||
}
|
||||
if (!Regex.isSimpleMatchPattern(expression)) {
|
||||
if (!unavailableIgnoredOrExists(options, metaData, expression)) {
|
||||
throw infe(expression);
|
||||
}
|
||||
if (result != null) {
|
||||
if (add) {
|
||||
result.add(expression);
|
||||
} else {
|
||||
result.remove(expression);
|
||||
}
|
||||
if (add) {
|
||||
result.add(expression);
|
||||
} else {
|
||||
result.remove(expression);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (result == null) {
|
||||
// add all the previous ones...
|
||||
result = new HashSet<>(expressions.subList(0, i));
|
||||
}
|
||||
|
||||
final IndexMetaData.State excludeState = excludeState(options);
|
||||
final Map<String, AliasOrIndex> matches = matches(metaData, expression);
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents the allocation decision by an allocator for an unassigned shard.
|
||||
*/
|
||||
public class UnassignedShardDecision {
|
||||
/** a constant representing a shard decision where no decision was taken */
|
||||
public static final UnassignedShardDecision DECISION_NOT_TAKEN =
|
||||
new UnassignedShardDecision(null, null, null, null, null, null);
|
||||
|
||||
@Nullable
|
||||
private final Decision finalDecision;
|
||||
@Nullable
|
||||
private final AllocationStatus allocationStatus;
|
||||
@Nullable
|
||||
private final String finalExplanation;
|
||||
@Nullable
|
||||
private final String assignedNodeId;
|
||||
@Nullable
|
||||
private final String allocationId;
|
||||
@Nullable
|
||||
private final Map<String, Decision> nodeDecisions;
|
||||
|
||||
private UnassignedShardDecision(Decision finalDecision,
|
||||
AllocationStatus allocationStatus,
|
||||
String finalExplanation,
|
||||
String assignedNodeId,
|
||||
String allocationId,
|
||||
Map<String, Decision> nodeDecisions) {
|
||||
assert finalExplanation != null || finalDecision == null :
|
||||
"if a decision was taken, there must be an explanation for it";
|
||||
assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES :
|
||||
"a yes decision must have a node to assign the shard to";
|
||||
assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES :
|
||||
"only a yes decision should not have an allocation status";
|
||||
assert allocationId == null || assignedNodeId != null :
|
||||
"allocation id can only be null if the assigned node is null";
|
||||
this.finalDecision = finalDecision;
|
||||
this.allocationStatus = allocationStatus;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.allocationId = allocationId;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision.
|
||||
*/
|
||||
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) {
|
||||
return noDecision(allocationStatus, explanation, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision,
|
||||
* as well as the individual node-level decisions that comprised the final NO decision.
|
||||
*/
|
||||
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus,
|
||||
String explanation,
|
||||
@Nullable Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(explanation, "explanation must not be null");
|
||||
Objects.requireNonNull(allocationStatus, "allocationStatus must not be null");
|
||||
return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a THROTTLE decision with the given explanation and individual node-level decisions that
|
||||
* comprised the final THROTTLE decision.
|
||||
*/
|
||||
public static UnassignedShardDecision throttleDecision(String explanation,
|
||||
Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(explanation, "explanation must not be null");
|
||||
return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
|
||||
nodeDecisions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a YES decision with the given explanation and individual node-level decisions that
|
||||
* comprised the final YES decision, along with the node id to which the shard is assigned and
|
||||
* the allocation id for the shard, if available.
|
||||
*/
|
||||
public static UnassignedShardDecision yesDecision(String explanation,
|
||||
String assignedNodeId,
|
||||
@Nullable String allocationId,
|
||||
Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(explanation, "explanation must not be null");
|
||||
Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null");
|
||||
return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if a decision was taken by the allocator, {@code false} otherwise.
|
||||
* If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
|
||||
*/
|
||||
public boolean isDecisionTaken() {
|
||||
return finalDecision != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
|
||||
* This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}.
|
||||
*/
|
||||
@Nullable
|
||||
public Decision getFinalDecision() {
|
||||
return finalDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
|
||||
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
|
||||
* throw an {@code IllegalArgumentException}.
|
||||
*/
|
||||
public Decision getFinalDecisionSafe() {
|
||||
if (isDecisionTaken() == false) {
|
||||
throw new IllegalArgumentException("decision must have been taken in order to return the final decision");
|
||||
}
|
||||
return finalDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if
|
||||
* no decision was taken or if the decision was {@link Decision.Type#YES}.
|
||||
*/
|
||||
@Nullable
|
||||
public AllocationStatus getAllocationStatus() {
|
||||
return allocationStatus;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getFinalExplanation() {
|
||||
return finalExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
|
||||
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
|
||||
* throw an {@code IllegalArgumentException}.
|
||||
*/
|
||||
public String getFinalExplanationSafe() {
|
||||
if (isDecisionTaken() == false) {
|
||||
throw new IllegalArgumentException("decision must have been taken in order to return the final explanation");
|
||||
}
|
||||
return finalExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns
|
||||
* a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return assignedNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the allocation id for the existing shard copy that the allocator is assigning the shard to.
|
||||
* This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value
|
||||
* and the node on which the shard is assigned already has a shard copy with an in-sync allocation id
|
||||
* that we can re-use.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAllocationId() {
|
||||
return allocationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision}
|
||||
* as the decision for the given node.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, Decision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
|
@ -74,7 +74,7 @@ public class AllocationDeciders extends AllocationDecider {
|
|||
// short track if a NO is returned.
|
||||
if (decision == Decision.NO) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName());
|
||||
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName());
|
||||
}
|
||||
// short circuit only if debugging is not enabled
|
||||
if (!allocation.debugDecision()) {
|
||||
|
|
|
@ -318,7 +318,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
|
||||
throw new IllegalStateException("Shouldn't publish state when not master");
|
||||
}
|
||||
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
|
||||
|
||||
try {
|
||||
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
|
||||
} catch (FailedToCommitClusterStateException t) {
|
||||
|
@ -338,6 +338,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
});
|
||||
throw t;
|
||||
}
|
||||
|
||||
// update the set of nodes to ping after the new cluster state has been published
|
||||
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current set of nodes involved in the node fault detection.
|
||||
* NB: for testing purposes
|
||||
*/
|
||||
public Set<DiscoveryNode> getFaultDetectionNodes() {
|
||||
return nodesFD.getNodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -41,6 +41,8 @@ import org.elasticsearch.transport.TransportResponseHandler;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
|
@ -91,6 +93,14 @@ public class NodesFaultDetection extends FaultDetection {
|
|||
listeners.remove(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current set of nodes involved in node fault detection.
|
||||
* NB: For testing purposes.
|
||||
*/
|
||||
public Set<DiscoveryNode> getNodes() {
|
||||
return Collections.unmodifiableSet(nodesFD.keySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* make sure that nodes in clusterState are pinged. Any pinging to nodes which are not
|
||||
* part of the cluster will be stopped
|
||||
|
|
|
@ -209,13 +209,6 @@ public final class NodeEnvironment implements Closeable {
|
|||
for (int dirIndex = 0; dirIndex < environment.dataFiles().length; dirIndex++) {
|
||||
Path dataDirWithClusterName = environment.dataWithClusterFiles()[dirIndex];
|
||||
Path dataDir = environment.dataFiles()[dirIndex];
|
||||
// TODO: Remove this in 6.0, we are no longer going to read from the cluster name directory
|
||||
if (readFromDataPathWithClusterName(dataDirWithClusterName)) {
|
||||
DeprecationLogger deprecationLogger = new DeprecationLogger(startupTraceLogger);
|
||||
deprecationLogger.deprecated("ES has detected the [path.data] folder using the cluster name as a folder [{}], " +
|
||||
"Elasticsearch 6.0 will not allow the cluster name as a folder within the data path", dataDir);
|
||||
dataDir = dataDirWithClusterName;
|
||||
}
|
||||
Path dir = dataDir.resolve(NODES_FOLDER).resolve(Integer.toString(possibleLockId));
|
||||
Files.createDirectories(dir);
|
||||
|
||||
|
@ -289,25 +282,6 @@ public final class NodeEnvironment implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
// Visible for testing
|
||||
/** Returns true if data should be read from the data path that includes the cluster name (ie, it has data in it) */
|
||||
static boolean readFromDataPathWithClusterName(Path dataPathWithClusterName) throws IOException {
|
||||
if (Files.exists(dataPathWithClusterName) == false || // If it doesn't exist
|
||||
Files.isDirectory(dataPathWithClusterName) == false || // Or isn't a directory
|
||||
dirEmpty(dataPathWithClusterName)) { // Or if it's empty
|
||||
// No need to read from cluster-name folder!
|
||||
return false;
|
||||
}
|
||||
// The "nodes" directory inside of the cluster name
|
||||
Path nodesPath = dataPathWithClusterName.resolve(NODES_FOLDER);
|
||||
if (Files.isDirectory(nodesPath)) {
|
||||
// The cluster has data in the "nodes" so we should read from the cluster-named folder for now
|
||||
return true;
|
||||
}
|
||||
// Hey the nodes directory didn't exist, so we can safely use whatever directory we feel appropriate
|
||||
return false;
|
||||
}
|
||||
|
||||
private static void releaseAndNullLocks(Lock[] locks) {
|
||||
for (int i = 0; i < locks.length; i++) {
|
||||
if (locks[i] != null) {
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
/**
|
||||
* An abstract class that implements basic functionality for allocating
|
||||
* shards to nodes based on shard copies that already exist in the cluster.
|
||||
*
|
||||
* Individual implementations of this class are responsible for providing
|
||||
* the logic to determine to which nodes (if any) those shards are allocated.
|
||||
*/
|
||||
public abstract class BaseGatewayShardAllocator extends AbstractComponent {
|
||||
|
||||
public BaseGatewayShardAllocator(Settings settings) {
|
||||
super(settings);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate unassigned shards to nodes (if any) where valid copies of the shard already exist.
|
||||
* It is up to the individual implementations of {@link #makeAllocationDecision(ShardRouting, RoutingAllocation, Logger)}
|
||||
* to make decisions on assigning shards to nodes.
|
||||
*
|
||||
* @param allocation the allocation state container object
|
||||
*/
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
final ShardRouting shard = unassignedIterator.next();
|
||||
final UnassignedShardDecision unassignedShardDecision = makeAllocationDecision(shard, allocation, logger);
|
||||
|
||||
if (unassignedShardDecision.isDecisionTaken() == false) {
|
||||
// no decision was taken by this allocator
|
||||
continue;
|
||||
}
|
||||
|
||||
if (unassignedShardDecision.getFinalDecisionSafe().type() == Decision.Type.YES) {
|
||||
unassignedIterator.initialize(unassignedShardDecision.getAssignedNodeId(),
|
||||
unassignedShardDecision.getAllocationId(),
|
||||
shard.primary() ? ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE :
|
||||
allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE),
|
||||
allocation.changes());
|
||||
} else {
|
||||
unassignedIterator.removeAndIgnore(unassignedShardDecision.getAllocationStatus(), allocation.changes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a decision on the allocation of an unassigned shard. This method is used by
|
||||
* {@link #allocateUnassigned(RoutingAllocation)} to make decisions about whether or not
|
||||
* the shard can be allocated by this allocator and if so, to which node it will be allocated.
|
||||
*
|
||||
* @param unassignedShard the unassigned shard to allocate
|
||||
* @param allocation the current routing state
|
||||
* @param logger the logger
|
||||
* @return an {@link UnassignedShardDecision} with the final decision of whether to allocate and details of the decision
|
||||
*/
|
||||
public abstract UnassignedShardDecision makeAllocationDecision(ShardRouting unassignedShard,
|
||||
RoutingAllocation allocation,
|
||||
Logger logger);
|
||||
}
|
|
@ -19,12 +19,12 @@
|
|||
|
||||
package org.elasticsearch.gateway;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
|
@ -32,19 +32,23 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.gateway.AsyncShardFetch.FetchResult;
|
||||
import org.elasticsearch.gateway.TransportNodesListGatewayStartedShards.NodeGatewayStartedShards;
|
||||
import org.elasticsearch.index.shard.ShardStateMetaData;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.LinkedHashMap;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -62,7 +66,7 @@ import java.util.stream.Collectors;
|
|||
* nor does it allocate primaries when a primary shard failed and there is a valid replica
|
||||
* copy that can immediately be promoted to primary, as this takes place in {@link RoutingNodes#failShard}.
|
||||
*/
|
||||
public abstract class PrimaryShardAllocator extends AbstractComponent {
|
||||
public abstract class PrimaryShardAllocator extends BaseGatewayShardAllocator {
|
||||
|
||||
private static final Function<String, String> INITIAL_SHARDS_PARSER = (value) -> {
|
||||
switch (value) {
|
||||
|
@ -94,110 +98,161 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
logger.debug("using initial_shards [{}]", NODE_INITIAL_SHARDS_SETTING.get(settings));
|
||||
}
|
||||
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final MetaData metaData = allocation.metaData();
|
||||
/**
|
||||
* Is the allocator responsible for allocating the given {@link ShardRouting}?
|
||||
*/
|
||||
private static boolean isResponsibleFor(final ShardRouting shard) {
|
||||
return shard.primary() // must be primary
|
||||
&& shard.unassigned() // must be unassigned
|
||||
// only handle either an existing store or a snapshot recovery
|
||||
&& (shard.recoverySource().getType() == RecoverySource.Type.EXISTING_STORE
|
||||
|| shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT);
|
||||
}
|
||||
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
final ShardRouting shard = unassignedIterator.next();
|
||||
@Override
|
||||
public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
|
||||
final RoutingAllocation allocation,
|
||||
final Logger logger) {
|
||||
if (isResponsibleFor(unassignedShard) == false) {
|
||||
// this allocator is not responsible for allocating this shard
|
||||
return UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
}
|
||||
|
||||
if (shard.primary() == false) {
|
||||
continue;
|
||||
}
|
||||
final boolean explain = allocation.debugDecision();
|
||||
final FetchResult<NodeGatewayStartedShards> shardState = fetchData(unassignedShard, allocation);
|
||||
if (shardState.hasData() == false) {
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
"still fetching shard state from the nodes in the cluster");
|
||||
}
|
||||
|
||||
if (shard.recoverySource().getType() != RecoverySource.Type.EXISTING_STORE &&
|
||||
shard.recoverySource().getType() != RecoverySource.Type.SNAPSHOT) {
|
||||
continue;
|
||||
}
|
||||
// don't create a new IndexSetting object for every shard as this could cause a lot of garbage
|
||||
// on cluster restart if we allocate a boat load of shards
|
||||
final IndexMetaData indexMetaData = allocation.metaData().getIndexSafe(unassignedShard.index());
|
||||
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(unassignedShard.id());
|
||||
final boolean snapshotRestore = unassignedShard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
|
||||
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
|
||||
|
||||
final AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState = fetchData(shard, allocation);
|
||||
if (shardState.hasData() == false) {
|
||||
logger.trace("{}: ignoring allocation, still fetching shard started state", shard);
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes());
|
||||
continue;
|
||||
}
|
||||
final NodeShardsResult nodeShardsResult;
|
||||
final boolean enoughAllocationsFound;
|
||||
|
||||
// don't create a new IndexSetting object for every shard as this could cause a lot of garbage
|
||||
// on cluster restart if we allocate a boat load of shards
|
||||
final IndexMetaData indexMetaData = metaData.getIndexSafe(shard.index());
|
||||
final Set<String> inSyncAllocationIds = indexMetaData.inSyncAllocationIds(shard.id());
|
||||
final boolean snapshotRestore = shard.recoverySource().getType() == RecoverySource.Type.SNAPSHOT;
|
||||
final boolean recoverOnAnyNode = recoverOnAnyNode(indexMetaData);
|
||||
|
||||
final NodeShardsResult nodeShardsResult;
|
||||
final boolean enoughAllocationsFound;
|
||||
|
||||
if (inSyncAllocationIds.isEmpty()) {
|
||||
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) : "trying to allocated a primary with an empty allocation id set, but index is new";
|
||||
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
|
||||
// fall back to old version-based allocation mode
|
||||
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
|
||||
nodeShardsResult = buildVersionBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode, allocation.getIgnoreNodes(shard.shardId()), shardState);
|
||||
if (snapshotRestore || recoverOnAnyNode) {
|
||||
enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
|
||||
} else {
|
||||
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
|
||||
}
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", shard.index(), shard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, shard);
|
||||
if (inSyncAllocationIds.isEmpty()) {
|
||||
assert Version.indexCreated(indexMetaData.getSettings()).before(Version.V_5_0_0_alpha1) :
|
||||
"trying to allocated a primary with an empty allocation id set, but index is new";
|
||||
// when we load an old index (after upgrading cluster) or restore a snapshot of an old index
|
||||
// fall back to old version-based allocation mode
|
||||
// Note that once the shard has been active, lastActiveAllocationIds will be non-empty
|
||||
nodeShardsResult = buildVersionBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
|
||||
allocation.getIgnoreNodes(unassignedShard.shardId()), shardState, logger);
|
||||
if (snapshotRestore || recoverOnAnyNode) {
|
||||
enoughAllocationsFound = nodeShardsResult.allocationsFound > 0;
|
||||
} else {
|
||||
assert inSyncAllocationIds.isEmpty() == false;
|
||||
// use allocation ids to select nodes
|
||||
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(shard, snapshotRestore || recoverOnAnyNode,
|
||||
allocation.getIgnoreNodes(shard.shardId()), inSyncAllocationIds, shardState);
|
||||
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
|
||||
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", shard.index(), shard.id(), nodeShardsResult.orderedAllocationCandidates.size(), shard, inSyncAllocationIds);
|
||||
enoughAllocationsFound = isEnoughVersionBasedAllocationsFound(indexMetaData, nodeShardsResult);
|
||||
}
|
||||
logger.debug("[{}][{}]: version-based allocation for pre-{} index found {} allocations of {}", unassignedShard.index(),
|
||||
unassignedShard.id(), Version.V_5_0_0_alpha1, nodeShardsResult.allocationsFound, unassignedShard);
|
||||
} else {
|
||||
assert inSyncAllocationIds.isEmpty() == false;
|
||||
// use allocation ids to select nodes
|
||||
nodeShardsResult = buildAllocationIdBasedNodeShardsResult(unassignedShard, snapshotRestore || recoverOnAnyNode,
|
||||
allocation.getIgnoreNodes(unassignedShard.shardId()), inSyncAllocationIds, shardState, logger);
|
||||
enoughAllocationsFound = nodeShardsResult.orderedAllocationCandidates.size() > 0;
|
||||
logger.debug("[{}][{}]: found {} allocation candidates of {} based on allocation ids: [{}]", unassignedShard.index(),
|
||||
unassignedShard.id(), nodeShardsResult.orderedAllocationCandidates.size(), unassignedShard, inSyncAllocationIds);
|
||||
}
|
||||
|
||||
if (enoughAllocationsFound == false){
|
||||
if (snapshotRestore) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]", shard.index(), shard.id(), shard.recoverySource());
|
||||
} else if (recoverOnAnyNode) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, recover from any node", shard.index(), shard.id());
|
||||
} else {
|
||||
// we can't really allocate, so ignore it and continue
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.NO_VALID_SHARD_COPY, allocation.changes());
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]", shard.index(), shard.id(), nodeShardsResult.allocationsFound);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
|
||||
allocation, nodeShardsResult.orderedAllocationCandidates, shard, false
|
||||
);
|
||||
if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
|
||||
NodeGatewayStartedShards nodeShardState = nodesToAllocate.yesNodeShards.get(0);
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodeShardState.getNode());
|
||||
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(), ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes());
|
||||
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
|
||||
// The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
|
||||
// can be force-allocated to one of the nodes.
|
||||
final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
|
||||
allocation, nodeShardsResult.orderedAllocationCandidates, shard, true
|
||||
);
|
||||
if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
|
||||
NodeGatewayStartedShards nodeShardState = nodesToForceAllocate.yesNodeShards.get(0);
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
|
||||
shard.index(), shard.id(), shard, nodeShardState.getNode());
|
||||
unassignedIterator.initialize(nodeShardState.getNode().getId(), nodeShardState.allocationId(),
|
||||
ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE, allocation.changes());
|
||||
} else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
|
||||
shard.index(), shard.id(), shard, nodesToForceAllocate.throttleNodeShards);
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes());
|
||||
} else {
|
||||
logger.debug("[{}][{}]: forced primary allocation denied [{}]", shard.index(), shard.id(), shard);
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_NO, allocation.changes());
|
||||
}
|
||||
if (enoughAllocationsFound == false) {
|
||||
if (snapshotRestore) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, will restore from [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard.recoverySource());
|
||||
return UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
} else if (recoverOnAnyNode) {
|
||||
// let BalancedShardsAllocator take care of allocating this shard
|
||||
logger.debug("[{}][{}]: missing local data, recover from any node", unassignedShard.index(), unassignedShard.id());
|
||||
return UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
} else {
|
||||
// we are throttling this, but we have enough to allocate to this node, ignore it for now
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation", shard.index(), shard.id(), shard, nodesToAllocate.throttleNodeShards);
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.DECIDERS_THROTTLED, allocation.changes());
|
||||
// We have a shard that was previously allocated, but we could not find a valid shard copy to allocate the primary.
|
||||
// We could just be waiting for the node that holds the primary to start back up, in which case the allocation for
|
||||
// this shard will be picked up when the node joins and we do another allocation reroute
|
||||
logger.debug("[{}][{}]: not allocating, number_of_allocated_shards_found [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), nodeShardsResult.allocationsFound);
|
||||
return UnassignedShardDecision.noDecision(AllocationStatus.NO_VALID_SHARD_COPY,
|
||||
"shard was previously allocated, but no valid shard copy could be found amongst the current nodes in the cluster");
|
||||
}
|
||||
}
|
||||
|
||||
final NodesToAllocate nodesToAllocate = buildNodesToAllocate(
|
||||
allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, false
|
||||
);
|
||||
if (nodesToAllocate.yesNodeShards.isEmpty() == false) {
|
||||
DecidedNode decidedNode = nodesToAllocate.yesNodeShards.get(0);
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, decidedNode.nodeShardState.getNode());
|
||||
final String nodeId = decidedNode.nodeShardState.getNode().getId();
|
||||
return UnassignedShardDecision.yesDecision(
|
||||
"the allocation deciders returned a YES decision to allocate to node [" + nodeId + "]",
|
||||
nodeId, decidedNode.nodeShardState.allocationId(), buildNodeDecisions(nodesToAllocate, explain));
|
||||
} else if (nodesToAllocate.throttleNodeShards.isEmpty() == true && nodesToAllocate.noNodeShards.isEmpty() == false) {
|
||||
// The deciders returned a NO decision for all nodes with shard copies, so we check if primary shard
|
||||
// can be force-allocated to one of the nodes.
|
||||
final NodesToAllocate nodesToForceAllocate = buildNodesToAllocate(
|
||||
allocation, nodeShardsResult.orderedAllocationCandidates, unassignedShard, true
|
||||
);
|
||||
if (nodesToForceAllocate.yesNodeShards.isEmpty() == false) {
|
||||
final DecidedNode decidedNode = nodesToForceAllocate.yesNodeShards.get(0);
|
||||
final NodeGatewayStartedShards nodeShardState = decidedNode.nodeShardState;
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] on forced primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeShardState.getNode());
|
||||
final String nodeId = nodeShardState.getNode().getId();
|
||||
return UnassignedShardDecision.yesDecision(
|
||||
"allocating the primary shard to node [" + nodeId+ "], which has a complete copy of the shard data",
|
||||
nodeId,
|
||||
nodeShardState.allocationId(),
|
||||
buildNodeDecisions(nodesToForceAllocate, explain));
|
||||
} else if (nodesToForceAllocate.throttleNodeShards.isEmpty() == false) {
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on forced primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToForceAllocate.throttleNodeShards);
|
||||
return UnassignedShardDecision.throttleDecision(
|
||||
"allocation throttled as all nodes to which the shard may be force allocated are busy with other recoveries",
|
||||
buildNodeDecisions(nodesToForceAllocate, explain));
|
||||
} else {
|
||||
logger.debug("[{}][{}]: forced primary allocation denied [{}]",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard);
|
||||
return UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO,
|
||||
"all nodes that hold a valid shard copy returned a NO decision, and force allocation is not permitted",
|
||||
buildNodeDecisions(nodesToForceAllocate, explain));
|
||||
}
|
||||
} else {
|
||||
// we are throttling this, since we are allowed to allocate to this node but there are enough allocations
|
||||
// taking place on the node currently, ignore it for now
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] on primary allocation",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodesToAllocate.throttleNodeShards);
|
||||
return UnassignedShardDecision.throttleDecision(
|
||||
"allocation throttled as all nodes to which the shard may be allocated are busy with other recoveries",
|
||||
buildNodeDecisions(nodesToAllocate, explain));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a map of nodes to the corresponding allocation decisions for those nodes.
|
||||
*/
|
||||
private static Map<String, Decision> buildNodeDecisions(NodesToAllocate nodesToAllocate, boolean explain) {
|
||||
if (explain == false) {
|
||||
// not in explain mode, no need to return node level decisions
|
||||
return null;
|
||||
}
|
||||
Map<String, Decision> nodeDecisions = new LinkedHashMap<>();
|
||||
for (final DecidedNode decidedNode : nodesToAllocate.yesNodeShards) {
|
||||
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
|
||||
}
|
||||
for (final DecidedNode decidedNode : nodesToAllocate.throttleNodeShards) {
|
||||
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
|
||||
}
|
||||
for (final DecidedNode decidedNode : nodesToAllocate.noNodeShards) {
|
||||
nodeDecisions.put(decidedNode.nodeShardState.getNode().getId(), decidedNode.decision);
|
||||
}
|
||||
return nodeDecisions;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -205,8 +260,10 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
* lastActiveAllocationIds are added to the list. Otherwise, any node that has a shard is added to the list, but
|
||||
* entries with matching allocation id are always at the front of the list.
|
||||
*/
|
||||
protected NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
Set<String> lastActiveAllocationIds, AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
|
||||
protected static NodeShardsResult buildAllocationIdBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard,
|
||||
Set<String> ignoreNodes, Set<String> lastActiveAllocationIds,
|
||||
FetchResult<NodeGatewayStartedShards> shardState,
|
||||
Logger logger) {
|
||||
LinkedList<NodeGatewayStartedShards> matchingNodeShardStates = new LinkedList<>();
|
||||
LinkedList<NodeGatewayStartedShards> nonMatchingNodeShardStates = new LinkedList<>();
|
||||
int numberOfAllocationsFound = 0;
|
||||
|
@ -299,9 +356,9 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
List<NodeGatewayStartedShards> nodeShardStates,
|
||||
ShardRouting shardRouting,
|
||||
boolean forceAllocate) {
|
||||
List<NodeGatewayStartedShards> yesNodeShards = new ArrayList<>();
|
||||
List<NodeGatewayStartedShards> throttledNodeShards = new ArrayList<>();
|
||||
List<NodeGatewayStartedShards> noNodeShards = new ArrayList<>();
|
||||
List<DecidedNode> yesNodeShards = new ArrayList<>();
|
||||
List<DecidedNode> throttledNodeShards = new ArrayList<>();
|
||||
List<DecidedNode> noNodeShards = new ArrayList<>();
|
||||
for (NodeGatewayStartedShards nodeShardState : nodeShardStates) {
|
||||
RoutingNode node = allocation.routingNodes().node(nodeShardState.getNode().getId());
|
||||
if (node == null) {
|
||||
|
@ -310,12 +367,13 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
|
||||
Decision decision = forceAllocate ? allocation.deciders().canForceAllocatePrimary(shardRouting, node, allocation) :
|
||||
allocation.deciders().canAllocate(shardRouting, node, allocation);
|
||||
if (decision.type() == Decision.Type.THROTTLE) {
|
||||
throttledNodeShards.add(nodeShardState);
|
||||
} else if (decision.type() == Decision.Type.NO) {
|
||||
noNodeShards.add(nodeShardState);
|
||||
DecidedNode decidedNode = new DecidedNode(nodeShardState, decision);
|
||||
if (decision.type() == Type.THROTTLE) {
|
||||
throttledNodeShards.add(decidedNode);
|
||||
} else if (decision.type() == Type.NO) {
|
||||
noNodeShards.add(decidedNode);
|
||||
} else {
|
||||
yesNodeShards.add(nodeShardState);
|
||||
yesNodeShards.add(decidedNode);
|
||||
}
|
||||
}
|
||||
return new NodesToAllocate(Collections.unmodifiableList(yesNodeShards), Collections.unmodifiableList(throttledNodeShards), Collections.unmodifiableList(noNodeShards));
|
||||
|
@ -325,8 +383,8 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
* Builds a list of previously started shards. If matchAnyShard is set to false, only shards with the highest shard version are added to
|
||||
* the list. Otherwise, any existing shard is added to the list, but entries with highest version are always at the front of the list.
|
||||
*/
|
||||
NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
AsyncShardFetch.FetchResult<NodeGatewayStartedShards> shardState) {
|
||||
static NodeShardsResult buildVersionBasedNodeShardsResult(ShardRouting shard, boolean matchAnyShard, Set<String> ignoreNodes,
|
||||
FetchResult<NodeGatewayStartedShards> shardState, Logger logger) {
|
||||
final List<NodeGatewayStartedShards> allocationCandidates = new ArrayList<>();
|
||||
int numberOfAllocationsFound = 0;
|
||||
long highestVersion = ShardStateMetaData.NO_VERSION;
|
||||
|
@ -400,7 +458,7 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
&& IndexMetaData.INDEX_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE_SETTING.get(metaData.getSettings(), this.settings);
|
||||
}
|
||||
|
||||
protected abstract AsyncShardFetch.FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
protected abstract FetchResult<NodeGatewayStartedShards> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
||||
static class NodeShardsResult {
|
||||
public final List<NodeGatewayStartedShards> orderedAllocationCandidates;
|
||||
|
@ -413,16 +471,28 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
static class NodesToAllocate {
|
||||
final List<NodeGatewayStartedShards> yesNodeShards;
|
||||
final List<NodeGatewayStartedShards> throttleNodeShards;
|
||||
final List<NodeGatewayStartedShards> noNodeShards;
|
||||
final List<DecidedNode> yesNodeShards;
|
||||
final List<DecidedNode> throttleNodeShards;
|
||||
final List<DecidedNode> noNodeShards;
|
||||
|
||||
public NodesToAllocate(List<NodeGatewayStartedShards> yesNodeShards,
|
||||
List<NodeGatewayStartedShards> throttleNodeShards,
|
||||
List<NodeGatewayStartedShards> noNodeShards) {
|
||||
public NodesToAllocate(List<DecidedNode> yesNodeShards, List<DecidedNode> throttleNodeShards, List<DecidedNode> noNodeShards) {
|
||||
this.yesNodeShards = yesNodeShards;
|
||||
this.throttleNodeShards = throttleNodeShards;
|
||||
this.noNodeShards = noNodeShards;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This class encapsulates the shard state retrieved from a node and the decision that was made
|
||||
* by the allocator for allocating to the node that holds the shard copy.
|
||||
*/
|
||||
private static class DecidedNode {
|
||||
final NodeGatewayStartedShards nodeShardState;
|
||||
final Decision decision;
|
||||
|
||||
private DecidedNode(NodeGatewayStartedShards nodeShardState, Decision decision) {
|
||||
this.nodeShardState = nodeShardState;
|
||||
this.decision = decision;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import com.carrotsearch.hppc.ObjectLongHashMap;
|
|||
import com.carrotsearch.hppc.ObjectLongMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectLongCursor;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
|
@ -31,24 +31,25 @@ import org.elasticsearch.cluster.routing.RoutingNodes;
|
|||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.RoutingChangesObserver;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.UnassignedShardDecision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.index.store.StoreFileMetaData;
|
||||
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
*/
|
||||
public abstract class ReplicaShardAllocator extends AbstractComponent {
|
||||
public abstract class ReplicaShardAllocator extends BaseGatewayShardAllocator {
|
||||
|
||||
public ReplicaShardAllocator(Settings settings) {
|
||||
super(settings);
|
||||
|
@ -96,7 +97,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
|
||||
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores, false);
|
||||
if (matchingNodes.getNodeWithHighestMatch() != null) {
|
||||
DiscoveryNode currentNode = allocation.nodes().get(shard.currentNodeId());
|
||||
DiscoveryNode nodeWithHighestMatch = matchingNodes.getNodeWithHighestMatch();
|
||||
|
@ -128,86 +129,88 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
public void allocateUnassigned(RoutingAllocation allocation) {
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator = routingNodes.unassigned().iterator();
|
||||
while (unassignedIterator.hasNext()) {
|
||||
ShardRouting shard = unassignedIterator.next();
|
||||
if (shard.primary()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
if (shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
|
||||
Decision decision = canBeAllocatedToAtLeastOneNode(shard, allocation);
|
||||
if (decision.type() != Decision.Type.YES) {
|
||||
logger.trace("{}: ignoring allocation, can't be allocated on any node", shard);
|
||||
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes());
|
||||
continue;
|
||||
}
|
||||
|
||||
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(shard, allocation);
|
||||
if (shardStores.hasData() == false) {
|
||||
logger.trace("{}: ignoring allocation, still fetching shard stores", shard);
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.FETCHING_SHARD_DATA, allocation.changes());
|
||||
continue; // still fetching
|
||||
}
|
||||
|
||||
ShardRouting primaryShard = routingNodes.activePrimary(shard.shardId());
|
||||
assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
|
||||
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
|
||||
if (primaryStore == null) {
|
||||
// if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
|
||||
// we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
|
||||
// will try and recover from
|
||||
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
|
||||
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", shard);
|
||||
continue;
|
||||
}
|
||||
|
||||
MatchingNodes matchingNodes = findMatchingNodes(shard, allocation, primaryStore, shardStores);
|
||||
|
||||
if (matchingNodes.getNodeWithHighestMatch() != null) {
|
||||
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
|
||||
// we only check on THROTTLE since we checked before before on NO
|
||||
decision = allocation.deciders().canAllocate(shard, nodeWithHighestMatch, allocation);
|
||||
if (decision.type() == Decision.Type.THROTTLE) {
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
|
||||
// we are throttling this, but we have enough to allocate to this node, ignore it for now
|
||||
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.fromDecision(decision), allocation.changes());
|
||||
} else {
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store", shard.index(), shard.id(), shard, nodeWithHighestMatch.node());
|
||||
// we found a match
|
||||
unassignedIterator.initialize(nodeWithHighestMatch.nodeId(), null, allocation.clusterInfo().getShardSize(shard, ShardRouting.UNAVAILABLE_EXPECTED_SHARD_SIZE), allocation.changes());
|
||||
}
|
||||
} else if (matchingNodes.hasAnyData() == false) {
|
||||
// if we didn't manage to find *any* data (regardless of matching sizes), check if the allocation of the replica shard needs to be delayed
|
||||
ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes());
|
||||
}
|
||||
}
|
||||
/**
|
||||
* Is the allocator responsible for allocating the given {@link ShardRouting}?
|
||||
*/
|
||||
private static boolean isResponsibleFor(final ShardRouting shard) {
|
||||
return shard.primary() == false // must be a replica
|
||||
&& shard.unassigned() // must be unassigned
|
||||
// if we are allocating a replica because of index creation, no need to go and find a copy, there isn't one...
|
||||
&& shard.unassignedInfo().getReason() != UnassignedInfo.Reason.INDEX_CREATED;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if the allocation of the replica is to be delayed. Compute the delay and if it is delayed, add it to the ignore unassigned list
|
||||
* Note: we only care about replica in delayed allocation, since if we have an unassigned primary it
|
||||
* will anyhow wait to find an existing copy of the shard to be allocated
|
||||
* Note: the other side of the equation is scheduling a reroute in a timely manner, which happens in the RoutingService
|
||||
*
|
||||
* PUBLIC FOR TESTS!
|
||||
*
|
||||
* @param unassignedIterator iterator over unassigned shards
|
||||
* @param shard the shard which might be delayed
|
||||
*/
|
||||
public void ignoreUnassignedIfDelayed(RoutingNodes.UnassignedShards.UnassignedIterator unassignedIterator, ShardRouting shard, RoutingChangesObserver changes) {
|
||||
if (shard.unassignedInfo().isDelayed()) {
|
||||
logger.debug("{}: allocation of [{}] is delayed", shard.shardId(), shard);
|
||||
unassignedIterator.removeAndIgnore(AllocationStatus.DELAYED_ALLOCATION, changes);
|
||||
@Override
|
||||
public UnassignedShardDecision makeAllocationDecision(final ShardRouting unassignedShard,
|
||||
final RoutingAllocation allocation,
|
||||
final Logger logger) {
|
||||
if (isResponsibleFor(unassignedShard) == false) {
|
||||
// this allocator is not responsible for deciding on this shard
|
||||
return UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
}
|
||||
|
||||
final RoutingNodes routingNodes = allocation.routingNodes();
|
||||
final boolean explain = allocation.debugDecision();
|
||||
// pre-check if it can be allocated to any node that currently exists, so we won't list the store for it for nothing
|
||||
Tuple<Decision, Map<String, Decision>> allocateDecision = canBeAllocatedToAtLeastOneNode(unassignedShard, allocation, explain);
|
||||
if (allocateDecision.v1().type() != Decision.Type.YES) {
|
||||
logger.trace("{}: ignoring allocation, can't be allocated on any node", unassignedShard);
|
||||
return UnassignedShardDecision.noDecision(UnassignedInfo.AllocationStatus.fromDecision(allocateDecision.v1()),
|
||||
"all nodes returned a " + allocateDecision.v1().type() + " decision for allocating the replica shard",
|
||||
allocateDecision.v2());
|
||||
}
|
||||
|
||||
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> shardStores = fetchData(unassignedShard, allocation);
|
||||
if (shardStores.hasData() == false) {
|
||||
logger.trace("{}: ignoring allocation, still fetching shard stores", unassignedShard);
|
||||
allocation.setHasPendingAsyncFetch();
|
||||
return UnassignedShardDecision.noDecision(AllocationStatus.FETCHING_SHARD_DATA,
|
||||
"still fetching shard state from the nodes in the cluster");
|
||||
}
|
||||
|
||||
ShardRouting primaryShard = routingNodes.activePrimary(unassignedShard.shardId());
|
||||
assert primaryShard != null : "the replica shard can be allocated on at least one node, so there must be an active primary";
|
||||
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore = findStore(primaryShard, allocation, shardStores);
|
||||
if (primaryStore == null) {
|
||||
// if we can't find the primary data, it is probably because the primary shard is corrupted (and listing failed)
|
||||
// we want to let the replica be allocated in order to expose the actual problem with the primary that the replica
|
||||
// will try and recover from
|
||||
// Note, this is the existing behavior, as exposed in running CorruptFileTest#testNoPrimaryData
|
||||
logger.trace("{}: no primary shard store found or allocated, letting actual allocation figure it out", unassignedShard);
|
||||
return UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
}
|
||||
|
||||
MatchingNodes matchingNodes = findMatchingNodes(unassignedShard, allocation, primaryStore, shardStores, explain);
|
||||
assert explain == false || matchingNodes.nodeDecisions != null : "in explain mode, we must have individual node decisions";
|
||||
|
||||
if (matchingNodes.getNodeWithHighestMatch() != null) {
|
||||
RoutingNode nodeWithHighestMatch = allocation.routingNodes().node(matchingNodes.getNodeWithHighestMatch().getId());
|
||||
// we only check on THROTTLE since we checked before before on NO
|
||||
Decision decision = allocation.deciders().canAllocate(unassignedShard, nodeWithHighestMatch, allocation);
|
||||
if (decision.type() == Decision.Type.THROTTLE) {
|
||||
logger.debug("[{}][{}]: throttling allocation [{}] to [{}] in order to reuse its unallocated persistent store",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
|
||||
// we are throttling this, as we have enough other shards to allocate to this node, so ignore it for now
|
||||
return UnassignedShardDecision.throttleDecision(
|
||||
"returned a THROTTLE decision on each node that has an existing copy of the shard, so waiting to re-use one " +
|
||||
"of those copies", matchingNodes.nodeDecisions);
|
||||
} else {
|
||||
logger.debug("[{}][{}]: allocating [{}] to [{}] in order to reuse its unallocated persistent store",
|
||||
unassignedShard.index(), unassignedShard.id(), unassignedShard, nodeWithHighestMatch.node());
|
||||
// we found a match
|
||||
return UnassignedShardDecision.yesDecision(
|
||||
"allocating to node [" + nodeWithHighestMatch.nodeId() + "] in order to re-use its unallocated persistent store",
|
||||
nodeWithHighestMatch.nodeId(), null, matchingNodes.nodeDecisions);
|
||||
}
|
||||
} else if (matchingNodes.hasAnyData() == false && unassignedShard.unassignedInfo().isDelayed()) {
|
||||
// if we didn't manage to find *any* data (regardless of matching sizes), and the replica is
|
||||
// unassigned due to a node leaving, so we delay allocation of this replica to see if the
|
||||
// node with the shard copy will rejoin so we can re-use the copy it has
|
||||
logger.debug("{}: allocation of [{}] is delayed", unassignedShard.shardId(), unassignedShard);
|
||||
return UnassignedShardDecision.noDecision(AllocationStatus.DELAYED_ALLOCATION,
|
||||
"not allocating this shard, no nodes contain data for the replica and allocation is delayed");
|
||||
}
|
||||
|
||||
return UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -215,10 +218,15 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
*
|
||||
* Returns the best allocation decision for allocating the shard on any node (i.e. YES if at least one
|
||||
* node decided YES, THROTTLE if at least one node decided THROTTLE, and NO if none of the nodes decided
|
||||
* YES or THROTTLE.
|
||||
* YES or THROTTLE). If the explain flag is turned on AND the decision is NO or THROTTLE, then this method
|
||||
* also returns a map of nodes to decisions (second value in the tuple) to use for explanations; if the explain
|
||||
* flag is off, the second value in the return tuple will be null.
|
||||
*/
|
||||
private Decision canBeAllocatedToAtLeastOneNode(ShardRouting shard, RoutingAllocation allocation) {
|
||||
private Tuple<Decision, Map<String, Decision>> canBeAllocatedToAtLeastOneNode(ShardRouting shard,
|
||||
RoutingAllocation allocation,
|
||||
boolean explain) {
|
||||
Decision madeDecision = Decision.NO;
|
||||
Map<String, Decision> nodeDecisions = new HashMap<>();
|
||||
for (ObjectCursor<DiscoveryNode> cursor : allocation.nodes().getDataNodes().values()) {
|
||||
RoutingNode node = allocation.routingNodes().node(cursor.value.getId());
|
||||
if (node == null) {
|
||||
|
@ -227,13 +235,16 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
// if we can't allocate it on a node, ignore it, for example, this handles
|
||||
// cases for only allocating a replica after a primary
|
||||
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (explain) {
|
||||
nodeDecisions.put(node.nodeId(), decision);
|
||||
}
|
||||
if (decision.type() == Decision.Type.YES) {
|
||||
return decision;
|
||||
return Tuple.tuple(decision, null);
|
||||
} else if (madeDecision.type() == Decision.Type.NO && decision.type() == Decision.Type.THROTTLE) {
|
||||
madeDecision = decision;
|
||||
}
|
||||
}
|
||||
return madeDecision;
|
||||
return Tuple.tuple(madeDecision, explain ? nodeDecisions : null);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -254,8 +265,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
|
||||
private MatchingNodes findMatchingNodes(ShardRouting shard, RoutingAllocation allocation,
|
||||
TransportNodesListShardStoreMetaData.StoreFilesMetaData primaryStore,
|
||||
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data) {
|
||||
AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> data,
|
||||
boolean explain) {
|
||||
ObjectLongMap<DiscoveryNode> nodesToSize = new ObjectLongHashMap<>();
|
||||
Map<String, Decision> nodeDecisions = new HashMap<>();
|
||||
for (Map.Entry<DiscoveryNode, TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> nodeStoreEntry : data.getData().entrySet()) {
|
||||
DiscoveryNode discoNode = nodeStoreEntry.getKey();
|
||||
TransportNodesListShardStoreMetaData.StoreFilesMetaData storeFilesMetaData = nodeStoreEntry.getValue().storeFilesMetaData();
|
||||
|
@ -273,6 +286,10 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
// we only check for NO, since if this node is THROTTLING and it has enough "same data"
|
||||
// then we will try and assign it next time
|
||||
Decision decision = allocation.deciders().canAllocate(shard, node, allocation);
|
||||
if (explain) {
|
||||
nodeDecisions.put(node.nodeId(), decision);
|
||||
}
|
||||
|
||||
if (decision.type() == Decision.Type.NO) {
|
||||
continue;
|
||||
}
|
||||
|
@ -297,7 +314,7 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
return new MatchingNodes(nodesToSize);
|
||||
return new MatchingNodes(nodesToSize, explain ? nodeDecisions : null);
|
||||
}
|
||||
|
||||
protected abstract AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData> fetchData(ShardRouting shard, RoutingAllocation allocation);
|
||||
|
@ -305,9 +322,12 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
static class MatchingNodes {
|
||||
private final ObjectLongMap<DiscoveryNode> nodesToSize;
|
||||
private final DiscoveryNode nodeWithHighestMatch;
|
||||
@Nullable
|
||||
private final Map<String, Decision> nodeDecisions;
|
||||
|
||||
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize) {
|
||||
public MatchingNodes(ObjectLongMap<DiscoveryNode> nodesToSize, @Nullable Map<String, Decision> nodeDecisions) {
|
||||
this.nodesToSize = nodesToSize;
|
||||
this.nodeDecisions = nodeDecisions;
|
||||
|
||||
long highestMatchSize = 0;
|
||||
DiscoveryNode highestMatchNode = null;
|
||||
|
@ -340,5 +360,13 @@ public abstract class ReplicaShardAllocator extends AbstractComponent {
|
|||
public boolean hasAnyData() {
|
||||
return nodesToSize.isEmpty() == false;
|
||||
}
|
||||
|
||||
/**
|
||||
* The decisions map for all nodes with a shard copy, if available.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, Decision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -76,11 +76,11 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
return new GetStats(existsMetric.count(), TimeUnit.NANOSECONDS.toMillis(existsMetric.sum()), missingMetric.count(), TimeUnit.NANOSECONDS.toMillis(missingMetric.sum()), currentMetric.count());
|
||||
}
|
||||
|
||||
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) {
|
||||
public GetResult get(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
|
||||
currentMetric.inc();
|
||||
try {
|
||||
long now = System.nanoTime();
|
||||
GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext, ignoreErrorsOnGeneratedFields);
|
||||
GetResult getResult = innerGet(type, id, gFields, realtime, version, versionType, fetchSourceContext);
|
||||
|
||||
if (getResult.isExists()) {
|
||||
existsMetric.inc(System.nanoTime() - now);
|
||||
|
@ -139,7 +139,7 @@ public final class ShardGetService extends AbstractIndexShardComponent {
|
|||
return FetchSourceContext.DO_NOT_FETCH_SOURCE;
|
||||
}
|
||||
|
||||
private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext, boolean ignoreErrorsOnGeneratedFields) {
|
||||
private GetResult innerGet(String type, String id, String[] gFields, boolean realtime, long version, VersionType versionType, FetchSourceContext fetchSourceContext) {
|
||||
fetchSourceContext = normalizeFetchSourceContent(fetchSourceContext, gFields);
|
||||
|
||||
Engine.GetResult get = null;
|
||||
|
|
|
@ -113,6 +113,7 @@ import org.elasticsearch.script.ScriptModule;
|
|||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchModule;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.snapshots.SnapshotShardsService;
|
||||
import org.elasticsearch.snapshots.SnapshotsService;
|
||||
import org.elasticsearch.tasks.TaskResultsService;
|
||||
|
@ -122,7 +123,6 @@ import org.elasticsearch.transport.TransportService;
|
|||
import org.elasticsearch.tribe.TribeService;
|
||||
import org.elasticsearch.watcher.ResourceWatcherService;
|
||||
|
||||
import javax.management.MBeanServerPermission;
|
||||
import java.io.BufferedWriter;
|
||||
import java.io.Closeable;
|
||||
import java.io.IOException;
|
||||
|
@ -133,13 +133,11 @@ import java.nio.charset.Charset;
|
|||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.security.AccessControlException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
@ -383,12 +381,8 @@ public class Node implements Closeable {
|
|||
b.bind(MetaDataUpgrader.class).toInstance(metaDataUpgrader);
|
||||
b.bind(MetaStateService.class).toInstance(metaStateService);
|
||||
b.bind(IndicesService.class).toInstance(indicesService);
|
||||
Class<? extends SearchService> searchServiceImpl = pickSearchServiceImplementation();
|
||||
if (searchServiceImpl == SearchService.class) {
|
||||
b.bind(SearchService.class).asEagerSingleton();
|
||||
} else {
|
||||
b.bind(SearchService.class).to(searchServiceImpl).asEagerSingleton();
|
||||
}
|
||||
b.bind(SearchService.class).toInstance(newSearchService(clusterService, indicesService,
|
||||
threadPool, scriptModule.getScriptService(), bigArrays, searchModule.getFetchPhase()));
|
||||
pluginComponents.stream().forEach(p -> b.bind((Class) p.getClass()).toInstance(p));
|
||||
|
||||
}
|
||||
|
@ -793,10 +787,12 @@ public class Node implements Closeable {
|
|||
}
|
||||
|
||||
/**
|
||||
* Select the search service implementation. Overrided by tests.
|
||||
* Creates a new the SearchService. This method can be overwritten by tests to inject mock implementations.
|
||||
*/
|
||||
protected Class<? extends SearchService> pickSearchServiceImplementation() {
|
||||
return SearchService.class;
|
||||
protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
|
||||
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
|
||||
FetchPhase fetchPhase) {
|
||||
return new SearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -60,8 +60,8 @@ class ListPluginsCommand extends SettingCommand {
|
|||
}
|
||||
Collections.sort(plugins);
|
||||
for (final Path plugin : plugins) {
|
||||
terminal.println(plugin.getFileName().toString());
|
||||
PluginInfo info = PluginInfo.readFromProperties(env.pluginsFile().resolve(plugin.toAbsolutePath()));
|
||||
terminal.println(plugin.getFileName().toString() + "@" + info.getVersion());
|
||||
terminal.println(Terminal.Verbosity.VERBOSE, info.toString());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,7 +58,6 @@ public class RestGetAction extends BaseRestHandler {
|
|||
getRequest.parent(request.param("parent"));
|
||||
getRequest.preference(request.param("preference"));
|
||||
getRequest.realtime(request.paramAsBoolean("realtime", getRequest.realtime()));
|
||||
getRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false));
|
||||
|
||||
String sField = request.param("fields");
|
||||
if (sField != null) {
|
||||
|
|
|
@ -59,7 +59,6 @@ public class RestMultiGetAction extends BaseRestHandler {
|
|||
multiGetRequest.refresh(request.paramAsBoolean("refresh", multiGetRequest.refresh()));
|
||||
multiGetRequest.preference(request.param("preference"));
|
||||
multiGetRequest.realtime(request.paramAsBoolean("realtime", multiGetRequest.realtime()));
|
||||
multiGetRequest.ignoreErrorsOnGeneratedFields(request.paramAsBoolean("ignore_errors_on_generated_fields", false));
|
||||
|
||||
String[] sFields = null;
|
||||
String sField = request.param("fields");
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.internal;
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BooleanClause.Occur;
|
||||
|
@ -53,8 +53,6 @@ import org.elasticsearch.index.query.QueryShardContext;
|
|||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchExtBuilder;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.aggregations.SearchContextAggregations;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
|
@ -64,6 +62,10 @@ import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
|
|||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.SearchContextHighlight;
|
||||
import org.elasticsearch.search.internal.ContextIndexSearcher;
|
||||
import org.elasticsearch.search.internal.ScrollContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchRequest;
|
||||
import org.elasticsearch.search.lookup.SearchLookup;
|
||||
import org.elasticsearch.search.profile.Profilers;
|
||||
import org.elasticsearch.search.query.QueryPhaseExecutionException;
|
||||
|
@ -80,7 +82,7 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class DefaultSearchContext extends SearchContext {
|
||||
final class DefaultSearchContext extends SearchContext {
|
||||
|
||||
private final long id;
|
||||
private final ShardSearchRequest request;
|
||||
|
@ -123,10 +125,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
* things like the type filter or alias filters.
|
||||
*/
|
||||
private ParsedQuery originalQuery;
|
||||
/**
|
||||
* Just like originalQuery but with the filters from types, aliases and slice applied.
|
||||
*/
|
||||
private ParsedQuery filteredQuery;
|
||||
|
||||
/**
|
||||
* The query to actually execute.
|
||||
*/
|
||||
|
@ -151,7 +150,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
private final QueryShardContext queryShardContext;
|
||||
private FetchPhase fetchPhase;
|
||||
|
||||
public DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
|
||||
DefaultSearchContext(long id, ShardSearchRequest request, SearchShardTarget shardTarget, Engine.Searcher engineSearcher,
|
||||
IndexService indexService, IndexShard indexShard, ScriptService scriptService,
|
||||
BigArrays bigArrays, Counter timeEstimateCounter, ParseFieldMatcher parseFieldMatcher, TimeValue timeout,
|
||||
FetchPhase fetchPhase) {
|
||||
|
@ -187,7 +186,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
* Should be called before executing the main query and after all other parameters have been set.
|
||||
*/
|
||||
@Override
|
||||
public void preProcess() {
|
||||
public void preProcess(boolean rewrite) {
|
||||
if (hasOnlySuggest() ) {
|
||||
return;
|
||||
}
|
||||
|
@ -241,20 +240,22 @@ public class DefaultSearchContext extends SearchContext {
|
|||
if (queryBoost() != AbstractQueryBuilder.DEFAULT_BOOST) {
|
||||
parsedQuery(new ParsedQuery(new FunctionScoreQuery(query(), new WeightFactorFunction(queryBoost)), parsedQuery()));
|
||||
}
|
||||
filteredQuery(buildFilteredQuery());
|
||||
try {
|
||||
this.query = searcher().rewrite(this.query);
|
||||
} catch (IOException e) {
|
||||
throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e);
|
||||
this.query = buildFilteredQuery();
|
||||
if (rewrite) {
|
||||
try {
|
||||
this.query = searcher.rewrite(query);
|
||||
} catch (IOException e) {
|
||||
throw new QueryPhaseExecutionException(this, "Failed to rewrite main query", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private ParsedQuery buildFilteredQuery() {
|
||||
Query searchFilter = searchFilter(queryShardContext.getTypes());
|
||||
private Query buildFilteredQuery() {
|
||||
final Query searchFilter = searchFilter(queryShardContext.getTypes());
|
||||
if (searchFilter == null) {
|
||||
return originalQuery;
|
||||
return originalQuery.query();
|
||||
}
|
||||
Query result;
|
||||
final Query result;
|
||||
if (Queries.isConstantMatchAllQuery(query())) {
|
||||
result = new ConstantScoreQuery(searchFilter);
|
||||
} else {
|
||||
|
@ -263,7 +264,7 @@ public class DefaultSearchContext extends SearchContext {
|
|||
.add(searchFilter, Occur.FILTER)
|
||||
.build();
|
||||
}
|
||||
return new ParsedQuery(result, originalQuery);
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -618,15 +619,6 @@ public class DefaultSearchContext extends SearchContext {
|
|||
return this;
|
||||
}
|
||||
|
||||
public ParsedQuery filteredQuery() {
|
||||
return filteredQuery;
|
||||
}
|
||||
|
||||
private void filteredQuery(ParsedQuery filteredQuery) {
|
||||
this.filteredQuery = filteredQuery;
|
||||
this.query = filteredQuery.query();
|
||||
}
|
||||
|
||||
@Override
|
||||
public ParsedQuery parsedQuery() {
|
||||
return this.originalQuery;
|
|
@ -95,7 +95,6 @@ import org.elasticsearch.plugins.SearchPlugin.QuerySpec;
|
|||
import org.elasticsearch.plugins.SearchPlugin.ScoreFunctionSpec;
|
||||
import org.elasticsearch.plugins.SearchPlugin.SearchExtSpec;
|
||||
import org.elasticsearch.plugins.SearchPlugin.SearchExtensionSpec;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.Aggregator;
|
||||
import org.elasticsearch.search.aggregations.AggregatorParsers;
|
||||
|
@ -243,7 +242,6 @@ import org.elasticsearch.search.aggregations.pipeline.movavg.models.MovAvgModel;
|
|||
import org.elasticsearch.search.aggregations.pipeline.movavg.models.SimpleModel;
|
||||
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregationBuilder;
|
||||
import org.elasticsearch.search.aggregations.pipeline.serialdiff.SerialDiffPipelineAggregator;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.fetch.subphase.DocValueFieldsFetchSubPhase;
|
||||
|
@ -384,7 +382,6 @@ public class SearchModule extends AbstractModule {
|
|||
bind(IndicesQueriesRegistry.class).toInstance(queryParserRegistry);
|
||||
bind(SearchRequestParsers.class).toInstance(searchRequestParsers);
|
||||
bind(SearchExtRegistry.class).toInstance(searchExtParserRegistry);
|
||||
configureSearch();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -574,13 +571,6 @@ public class SearchModule extends AbstractModule {
|
|||
}
|
||||
}
|
||||
|
||||
protected void configureSearch() {
|
||||
// configure search private classes...
|
||||
bind(SearchPhaseController.class).asEagerSingleton();
|
||||
bind(FetchPhase.class).toInstance(new FetchPhase(fetchSubPhases));
|
||||
bind(SearchTransportService.class).asEagerSingleton();
|
||||
}
|
||||
|
||||
private void registerShapes() {
|
||||
if (ShapesAvailability.JTS_AVAILABLE && ShapesAvailability.SPATIAL4J_AVAILABLE) {
|
||||
ShapeBuilders.register(namedWriteables);
|
||||
|
@ -817,4 +807,8 @@ public class SearchModule extends AbstractModule {
|
|||
queryParserRegistry.register(spec.getParser(), spec.getName());
|
||||
namedWriteables.add(new Entry(QueryBuilder.class, spec.getName().getPreferredName(), spec.getReader()));
|
||||
}
|
||||
|
||||
public FetchPhase getFetchPhase() {
|
||||
return new FetchPhase(fetchSubPhases);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,9 +29,7 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParseFieldMatcher;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lucene.Lucene;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -66,7 +64,6 @@ import org.elasticsearch.search.fetch.ShardFetchRequest;
|
|||
import org.elasticsearch.search.fetch.subphase.DocValueFieldsContext;
|
||||
import org.elasticsearch.search.fetch.subphase.ScriptFieldsContext.ScriptField;
|
||||
import org.elasticsearch.search.fetch.subphase.highlight.HighlightBuilder;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.ScrollContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -141,10 +138,9 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
|
||||
private final ParseFieldMatcher parseFieldMatcher;
|
||||
|
||||
@Inject
|
||||
public SearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService, IndicesService indicesService,
|
||||
public SearchService(ClusterService clusterService, IndicesService indicesService,
|
||||
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays, FetchPhase fetchPhase) {
|
||||
super(settings);
|
||||
super(clusterService.getSettings());
|
||||
this.parseFieldMatcher = new ParseFieldMatcher(settings);
|
||||
this.threadPool = threadPool;
|
||||
this.clusterService = clusterService;
|
||||
|
@ -160,7 +156,7 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
this.keepAliveReaper = threadPool.scheduleWithFixedDelay(new Reaper(), keepAliveInterval, Names.SAME);
|
||||
|
||||
defaultSearchTimeout = DEFAULT_SEARCH_TIMEOUT_SETTING.get(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
|
||||
clusterService.getClusterSettings().addSettingsUpdateConsumer(DEFAULT_SEARCH_TIMEOUT_SETTING, this::setDefaultSearchTimeout);
|
||||
}
|
||||
|
||||
private void setDefaultSearchTimeout(TimeValue defaultSearchTimeout) {
|
||||
|
@ -520,16 +516,8 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
}
|
||||
|
||||
final SearchContext createContext(ShardSearchRequest request, @Nullable Engine.Searcher searcher) throws IOException {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().getId());
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
|
||||
|
||||
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
|
||||
|
||||
DefaultSearchContext context = new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
|
||||
indexService,
|
||||
indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
|
||||
defaultSearchTimeout, fetchPhase);
|
||||
DefaultSearchContext context = createSearchContext(request, defaultSearchTimeout, searcher);
|
||||
SearchContext.setCurrent(context);
|
||||
try {
|
||||
request.rewrite(context.getQueryShardContext());
|
||||
|
@ -572,6 +560,18 @@ public class SearchService extends AbstractLifecycleComponent implements IndexEv
|
|||
return context;
|
||||
}
|
||||
|
||||
public DefaultSearchContext createSearchContext(ShardSearchRequest request, TimeValue timeout, @Nullable Engine.Searcher searcher) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().getId());
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(clusterService.localNode().getId(), indexShard.shardId());
|
||||
Engine.Searcher engineSearcher = searcher == null ? indexShard.acquireSearcher("search") : searcher;
|
||||
|
||||
return new DefaultSearchContext(idGenerator.incrementAndGet(), request, shardTarget, engineSearcher,
|
||||
indexService,
|
||||
indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher,
|
||||
timeout, fetchPhase);
|
||||
}
|
||||
|
||||
private void freeAllContextForIndex(Index index) {
|
||||
assert index != null;
|
||||
for (SearchContext ctx : activeContexts.values()) {
|
||||
|
|
|
@ -36,9 +36,6 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
|
|||
return;
|
||||
}
|
||||
SourceLookup source = context.lookup().source();
|
||||
if (source.internalSourceRef() == null) {
|
||||
return; // source disabled in the mapping
|
||||
}
|
||||
FetchSourceContext fetchSourceContext = context.fetchSourceContext();
|
||||
assert fetchSourceContext.fetchSource();
|
||||
if (fetchSourceContext.includes().length == 0 && fetchSourceContext.excludes().length == 0) {
|
||||
|
@ -46,6 +43,11 @@ public final class FetchSourceSubPhase implements FetchSubPhase {
|
|||
return;
|
||||
}
|
||||
|
||||
if (source.internalSourceRef() == null) {
|
||||
throw new IllegalArgumentException("unable to fetch fields from _source field: _source is disabled in the mappings " +
|
||||
"for index [" + context.indexShard().shardId().getIndexName() + "]");
|
||||
}
|
||||
|
||||
Object value = source.filter(fetchSourceContext.includes(), fetchSourceContext.excludes());
|
||||
try {
|
||||
final int initialCapacity = Math.min(1024, source.internalSourceRef().length());
|
||||
|
|
|
@ -78,10 +78,7 @@ public final class CustomQueryScorer extends QueryScorer {
|
|||
@Override
|
||||
protected void extractUnknownQuery(Query query,
|
||||
Map<String, WeightedSpanTerm> terms) throws IOException {
|
||||
if (query instanceof FunctionScoreQuery) {
|
||||
query = ((FunctionScoreQuery) query).getSubQuery();
|
||||
extract(query, 1F, terms);
|
||||
} else if (query instanceof FiltersFunctionScoreQuery) {
|
||||
if (query instanceof FiltersFunctionScoreQuery) {
|
||||
query = ((FiltersFunctionScoreQuery) query).getSubQuery();
|
||||
extract(query, 1F, terms);
|
||||
} else if (terms.isEmpty()) {
|
||||
|
@ -97,9 +94,11 @@ public final class CustomQueryScorer extends QueryScorer {
|
|||
} else if (query instanceof HasChildQueryBuilder.LateParsingQuery) {
|
||||
// skip has_child or has_parent queries, see: https://github.com/elastic/elasticsearch/issues/14999
|
||||
return;
|
||||
} else if (query instanceof FunctionScoreQuery) {
|
||||
super.extract(((FunctionScoreQuery) query).getSubQuery(), boost, terms);
|
||||
} else {
|
||||
super.extract(query, boost, terms);
|
||||
}
|
||||
|
||||
super.extract(query, boost, terms);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -100,8 +100,8 @@ public abstract class FilteredSearchContext extends SearchContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() {
|
||||
in.preProcess();
|
||||
public void preProcess(boolean rewrite) {
|
||||
in.preProcess(rewrite);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -139,8 +139,9 @@ public abstract class SearchContext extends AbstractRefCounted implements Releas
|
|||
|
||||
/**
|
||||
* Should be called before executing the main query and after all other parameters have been set.
|
||||
* @param rewrite if the set query should be rewritten against the searcher returned from {@link #searcher()}
|
||||
*/
|
||||
public abstract void preProcess();
|
||||
public abstract void preProcess(boolean rewrite);
|
||||
|
||||
public abstract Query searchFilter(String[] types);
|
||||
|
||||
|
|
|
@ -81,14 +81,11 @@ public class ShardSearchLocalRequest implements ShardSearchRequest {
|
|||
this.nowInMillis = nowInMillis;
|
||||
}
|
||||
|
||||
public ShardSearchLocalRequest(String[] types, long nowInMillis) {
|
||||
public ShardSearchLocalRequest(ShardId shardId, String[] types, long nowInMillis, String[] filteringAliases) {
|
||||
this.types = types;
|
||||
this.nowInMillis = nowInMillis;
|
||||
}
|
||||
|
||||
public ShardSearchLocalRequest(String[] types, long nowInMillis, String[] filteringAliases) {
|
||||
this(types, nowInMillis);
|
||||
this.filteringAliases = filteringAliases;
|
||||
this.shardId = shardId;
|
||||
}
|
||||
|
||||
public ShardSearchLocalRequest(ShardId shardId, int numberOfShards, SearchType searchType, SearchSourceBuilder source, String[] types,
|
||||
|
|
|
@ -77,7 +77,7 @@ public class SubSearchContext extends FilteredSearchContext {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void preProcess() {
|
||||
public void preProcess(boolean rewrite) {
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -85,7 +85,7 @@ public class QueryPhase implements SearchPhase {
|
|||
|
||||
@Override
|
||||
public void preProcess(SearchContext context) {
|
||||
context.preProcess();
|
||||
context.preProcess(true);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -86,7 +86,7 @@ import org.elasticsearch.plugins.Plugin;
|
|||
import org.elasticsearch.script.MockScriptPlugin;
|
||||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.action.search.SearchTransportService;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESIntegTestCase.ClusterScope;
|
||||
|
|
|
@ -42,8 +42,6 @@ public class MultiGetShardRequestTests extends ESTestCase {
|
|||
if (randomBoolean()) {
|
||||
multiGetRequest.refresh(true);
|
||||
}
|
||||
multiGetRequest.ignoreErrorsOnGeneratedFields(randomBoolean());
|
||||
|
||||
MultiGetShardRequest multiGetShardRequest = new MultiGetShardRequest(multiGetRequest, "index", 0);
|
||||
int numItems = iterations(10, 30);
|
||||
for (int i = 0; i < numItems; i++) {
|
||||
|
@ -79,7 +77,6 @@ public class MultiGetShardRequestTests extends ESTestCase {
|
|||
assertThat(multiGetShardRequest2.preference(), equalTo(multiGetShardRequest.preference()));
|
||||
assertThat(multiGetShardRequest2.realtime(), equalTo(multiGetShardRequest.realtime()));
|
||||
assertThat(multiGetShardRequest2.refresh(), equalTo(multiGetShardRequest.refresh()));
|
||||
assertThat(multiGetShardRequest2.ignoreErrorsOnGeneratedFields(), equalTo(multiGetShardRequest.ignoreErrorsOnGeneratedFields()));
|
||||
assertThat(multiGetShardRequest2.items.size(), equalTo(multiGetShardRequest.items.size()));
|
||||
for (int i = 0; i < multiGetShardRequest2.items.size(); i++) {
|
||||
MultiGetRequest.Item item = multiGetShardRequest.items.get(i);
|
||||
|
|
|
@ -17,10 +17,11 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.controller;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.search.SearchPhaseController;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.text.Text;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
|
@ -49,6 +49,10 @@ public class WildcardExpressionResolverTests extends ESTestCase {
|
|||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testX*", "kuku"))), equalTo(newHashSet("testXXX", "testXYY", "kuku")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY", "kuku")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("*", "-kuku"))), equalTo(newHashSet("testXXX", "testXYY", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testYYY"))), equalTo(newHashSet("testXXX", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testXXX"))).size(), equalTo(0));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "+testY*"))), equalTo(newHashSet("testXXX", "testYYY")));
|
||||
assertThat(newHashSet(resolver.resolve(context, Arrays.asList("testXXX", "-testX*"))).size(), equalTo(0));
|
||||
}
|
||||
|
||||
public void testConvertWildcardsTests() {
|
||||
|
|
|
@ -0,0 +1,116 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Unit tests for the {@link UnassignedShardDecision} class.
|
||||
*/
|
||||
public class UnassignedShardDecisionTests extends ESTestCase {
|
||||
|
||||
public void testDecisionNotTaken() {
|
||||
UnassignedShardDecision unassignedShardDecision = UnassignedShardDecision.DECISION_NOT_TAKEN;
|
||||
assertFalse(unassignedShardDecision.isDecisionTaken());
|
||||
assertNull(unassignedShardDecision.getFinalDecision());
|
||||
assertNull(unassignedShardDecision.getAllocationStatus());
|
||||
assertNull(unassignedShardDecision.getAllocationId());
|
||||
assertNull(unassignedShardDecision.getAssignedNodeId());
|
||||
assertNull(unassignedShardDecision.getFinalExplanation());
|
||||
assertNull(unassignedShardDecision.getNodeDecisions());
|
||||
expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalDecisionSafe());
|
||||
expectThrows(IllegalArgumentException.class, () -> unassignedShardDecision.getFinalExplanationSafe());
|
||||
}
|
||||
|
||||
public void testNoDecision() {
|
||||
final AllocationStatus allocationStatus = randomFrom(
|
||||
AllocationStatus.DELAYED_ALLOCATION, AllocationStatus.NO_VALID_SHARD_COPY, AllocationStatus.FETCHING_SHARD_DATA
|
||||
);
|
||||
UnassignedShardDecision noDecision = UnassignedShardDecision.noDecision(allocationStatus, "something is wrong");
|
||||
assertTrue(noDecision.isDecisionTaken());
|
||||
assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type());
|
||||
assertEquals(allocationStatus, noDecision.getAllocationStatus());
|
||||
assertEquals("something is wrong", noDecision.getFinalExplanation());
|
||||
assertNull(noDecision.getNodeDecisions());
|
||||
assertNull(noDecision.getAssignedNodeId());
|
||||
assertNull(noDecision.getAllocationId());
|
||||
|
||||
Map<String, Decision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", Decision.NO);
|
||||
nodeDecisions.put("node2", Decision.NO);
|
||||
noDecision = UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, "something is wrong", nodeDecisions);
|
||||
assertTrue(noDecision.isDecisionTaken());
|
||||
assertEquals(Decision.Type.NO, noDecision.getFinalDecision().type());
|
||||
assertEquals(AllocationStatus.DECIDERS_NO, noDecision.getAllocationStatus());
|
||||
assertEquals("something is wrong", noDecision.getFinalExplanation());
|
||||
assertEquals(nodeDecisions, noDecision.getNodeDecisions());
|
||||
assertNull(noDecision.getAssignedNodeId());
|
||||
assertNull(noDecision.getAllocationId());
|
||||
|
||||
// test bad values
|
||||
expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(null, "a"));
|
||||
expectThrows(NullPointerException.class, () -> UnassignedShardDecision.noDecision(AllocationStatus.DECIDERS_NO, null));
|
||||
}
|
||||
|
||||
public void testThrottleDecision() {
|
||||
Map<String, Decision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", Decision.NO);
|
||||
nodeDecisions.put("node2", Decision.THROTTLE);
|
||||
UnassignedShardDecision throttleDecision = UnassignedShardDecision.throttleDecision("too much happening", nodeDecisions);
|
||||
assertTrue(throttleDecision.isDecisionTaken());
|
||||
assertEquals(Decision.Type.THROTTLE, throttleDecision.getFinalDecision().type());
|
||||
assertEquals(AllocationStatus.DECIDERS_THROTTLED, throttleDecision.getAllocationStatus());
|
||||
assertEquals("too much happening", throttleDecision.getFinalExplanation());
|
||||
assertEquals(nodeDecisions, throttleDecision.getNodeDecisions());
|
||||
assertNull(throttleDecision.getAssignedNodeId());
|
||||
assertNull(throttleDecision.getAllocationId());
|
||||
|
||||
// test bad values
|
||||
expectThrows(NullPointerException.class, () -> UnassignedShardDecision.throttleDecision(null, Collections.emptyMap()));
|
||||
}
|
||||
|
||||
public void testYesDecision() {
|
||||
Map<String, Decision> nodeDecisions = new HashMap<>();
|
||||
nodeDecisions.put("node1", Decision.YES);
|
||||
nodeDecisions.put("node2", Decision.NO);
|
||||
String allocId = randomBoolean() ? "allocId" : null;
|
||||
UnassignedShardDecision yesDecision = UnassignedShardDecision.yesDecision(
|
||||
"node was very kind", "node1", allocId, nodeDecisions
|
||||
);
|
||||
assertTrue(yesDecision.isDecisionTaken());
|
||||
assertEquals(Decision.Type.YES, yesDecision.getFinalDecision().type());
|
||||
assertNull(yesDecision.getAllocationStatus());
|
||||
assertEquals("node was very kind", yesDecision.getFinalExplanation());
|
||||
assertEquals(nodeDecisions, yesDecision.getNodeDecisions());
|
||||
assertEquals("node1", yesDecision.getAssignedNodeId());
|
||||
assertEquals(allocId, yesDecision.getAllocationId());
|
||||
|
||||
expectThrows(NullPointerException.class,
|
||||
() -> UnassignedShardDecision.yesDecision(null, "a", randomBoolean() ? "a" : null, Collections.emptyMap()));
|
||||
expectThrows(NullPointerException.class,
|
||||
() -> UnassignedShardDecision.yesDecision("a", null, null, Collections.emptyMap()));
|
||||
}
|
||||
}
|
|
@ -20,25 +20,44 @@
|
|||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode.Role;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.AssertingAckListener;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.MockNode;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.discovery.zen.ZenDiscovery.shouldIgnoreOrRejectNewClusterState;
|
||||
import static org.elasticsearch.discovery.zen.elect.ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING;
|
||||
import static org.elasticsearch.discovery.zen.publish.PublishClusterStateActionTests.createMockNode;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
|
||||
import static org.elasticsearch.test.ClusterServiceUtils.setState;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
|
@ -107,7 +126,7 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||
ArrayList<DiscoveryNode> masterNodes = new ArrayList<>();
|
||||
ArrayList<DiscoveryNode> allNodes = new ArrayList<>();
|
||||
for (int i = randomIntBetween(10, 20); i >= 0; i--) {
|
||||
Set<DiscoveryNode.Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(DiscoveryNode.Role.values())));
|
||||
Set<Role> roles = new HashSet<>(randomSubsetOf(Arrays.asList(Role.values())));
|
||||
DiscoveryNode node = new DiscoveryNode("node_" + i, "id_" + i, LocalTransportAddress.buildUnique(), Collections.emptyMap(),
|
||||
roles, Version.CURRENT);
|
||||
responses.add(new ZenPing.PingResponse(node, randomBoolean() ? null : node, new ClusterName("test"), randomBoolean()));
|
||||
|
@ -127,4 +146,80 @@ public class ZenDiscoveryUnitTests extends ESTestCase {
|
|||
assertThat(filteredNodes, equalTo(allNodes));
|
||||
}
|
||||
}
|
||||
|
||||
public void testNodesUpdatedAfterClusterStatePublished() throws Exception {
|
||||
ThreadPool threadPool = new TestThreadPool(getClass().getName());
|
||||
// randomly make minimum_master_nodes a value higher than we have nodes for, so it will force failure
|
||||
int minMasterNodes = randomBoolean() ? 3 : 1;
|
||||
Settings settings = Settings.builder()
|
||||
.put(DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), Integer.toString(minMasterNodes)).build();
|
||||
|
||||
Map<String, MockNode> nodes = new HashMap<>();
|
||||
ZenDiscovery zenDiscovery = null;
|
||||
ClusterService clusterService = null;
|
||||
try {
|
||||
Set<DiscoveryNode> expectedFDNodes = null;
|
||||
// create master node and its mocked up services
|
||||
MockNode master = createMockNode("master", settings, null, threadPool, logger, nodes).setAsMaster();
|
||||
ClusterState state = master.clusterState; // initial cluster state
|
||||
|
||||
// build the zen discovery and cluster service
|
||||
clusterService = createClusterService(threadPool, master.discoveryNode);
|
||||
setState(clusterService, state);
|
||||
zenDiscovery = buildZenDiscovery(settings, master, clusterService, threadPool);
|
||||
|
||||
// a new cluster state with a new discovery node (we will test if the cluster state
|
||||
// was updated by the presence of this node in NodesFaultDetection)
|
||||
MockNode newNode = createMockNode("new_node", settings, null, threadPool, logger, nodes);
|
||||
ClusterState newState = ClusterState.builder(state).incrementVersion().nodes(
|
||||
DiscoveryNodes.builder(state.nodes()).add(newNode.discoveryNode).masterNodeId(master.discoveryNode.getId())
|
||||
).build();
|
||||
|
||||
try {
|
||||
// publishing a new cluster state
|
||||
ClusterChangedEvent clusterChangedEvent = new ClusterChangedEvent("testing", newState, state);
|
||||
AssertingAckListener listener = new AssertingAckListener(newState.nodes().getSize() - 1);
|
||||
expectedFDNodes = zenDiscovery.getFaultDetectionNodes();
|
||||
zenDiscovery.publish(clusterChangedEvent, listener);
|
||||
listener.await(1, TimeUnit.HOURS);
|
||||
// publish was a success, update expected FD nodes based on new cluster state
|
||||
expectedFDNodes = fdNodesForState(newState, master.discoveryNode);
|
||||
} catch (Discovery.FailedToCommitClusterStateException e) {
|
||||
// not successful, so expectedFDNodes above should remain what it was originally assigned
|
||||
assertEquals(3, minMasterNodes); // ensure min master nodes is the higher value, otherwise we shouldn't fail
|
||||
}
|
||||
|
||||
assertEquals(expectedFDNodes, zenDiscovery.getFaultDetectionNodes());
|
||||
} finally {
|
||||
// clean close of transport service and publish action for each node
|
||||
zenDiscovery.close();
|
||||
clusterService.close();
|
||||
for (MockNode curNode : nodes.values()) {
|
||||
curNode.action.close();
|
||||
curNode.service.close();
|
||||
}
|
||||
terminate(threadPool);
|
||||
}
|
||||
}
|
||||
|
||||
private ZenDiscovery buildZenDiscovery(Settings settings, MockNode master, ClusterService clusterService, ThreadPool threadPool) {
|
||||
ClusterSettings clusterSettings = new ClusterSettings(settings, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS);
|
||||
ZenPingService zenPingService = new ZenPingService(settings, Collections.emptySet());
|
||||
ElectMasterService electMasterService = new ElectMasterService(settings);
|
||||
ZenDiscovery zenDiscovery = new ZenDiscovery(settings, threadPool, master.service, clusterService,
|
||||
clusterSettings, zenPingService, electMasterService);
|
||||
zenDiscovery.start();
|
||||
return zenDiscovery;
|
||||
}
|
||||
|
||||
private Set<DiscoveryNode> fdNodesForState(ClusterState clusterState, DiscoveryNode localNode) {
|
||||
final Set<DiscoveryNode> discoveryNodes = new HashSet<>();
|
||||
clusterState.getNodes().getNodes().valuesIt().forEachRemaining(discoveryNode -> {
|
||||
// the local node isn't part of the nodes that are pinged (don't ping ourselves)
|
||||
if (discoveryNode.getId().equals(localNode.getId()) == false) {
|
||||
discoveryNodes.add(discoveryNode);
|
||||
}
|
||||
});
|
||||
return discoveryNodes;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -145,21 +145,22 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public MockNode createMockNode(final String name) throws Exception {
|
||||
return createMockNode(name, Settings.EMPTY);
|
||||
}
|
||||
|
||||
public MockNode createMockNode(String name, Settings settings) throws Exception {
|
||||
return createMockNode(name, settings, null);
|
||||
return createMockNode(name, Settings.EMPTY, null);
|
||||
}
|
||||
|
||||
public MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener) throws Exception {
|
||||
return createMockNode(name, basSettings, listener, threadPool, logger, nodes);
|
||||
}
|
||||
|
||||
public static MockNode createMockNode(String name, final Settings basSettings, @Nullable ClusterStateListener listener,
|
||||
ThreadPool threadPool, Logger logger, Map<String, MockNode> nodes) throws Exception {
|
||||
final Settings settings = Settings.builder()
|
||||
.put("name", name)
|
||||
.put(TransportService.TRACE_LOG_INCLUDE_SETTING.getKey(), "", TransportService.TRACE_LOG_EXCLUDE_SETTING.getKey(), "NOTHING")
|
||||
.put(basSettings)
|
||||
.build();
|
||||
|
||||
MockTransportService service = buildTransportService(settings);
|
||||
MockTransportService service = buildTransportService(settings, threadPool);
|
||||
DiscoveryNode discoveryNode = DiscoveryNode.createLocal(settings, service.boundAddress().publishAddress(),
|
||||
NodeEnvironment.generateNodeId(settings));
|
||||
MockNode node = new MockNode(discoveryNode, service, listener, logger);
|
||||
|
@ -228,14 +229,14 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
terminate(threadPool);
|
||||
}
|
||||
|
||||
protected MockTransportService buildTransportService(Settings settings) {
|
||||
MockTransportService transportService = MockTransportService.local(Settings.EMPTY, Version.CURRENT, threadPool);
|
||||
private static MockTransportService buildTransportService(Settings settings, ThreadPool threadPool) {
|
||||
MockTransportService transportService = MockTransportService.local(settings, Version.CURRENT, threadPool);
|
||||
transportService.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
return transportService;
|
||||
}
|
||||
|
||||
protected MockPublishAction buildPublishClusterStateAction(
|
||||
private static MockPublishAction buildPublishClusterStateAction(
|
||||
Settings settings,
|
||||
MockTransportService transportService,
|
||||
Supplier<ClusterState> clusterStateSupplier,
|
||||
|
@ -253,8 +254,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testSimpleClusterStatePublishing() throws Exception {
|
||||
MockNode nodeA = createMockNode("nodeA", Settings.EMPTY).setAsMaster();
|
||||
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
|
||||
MockNode nodeA = createMockNode("nodeA").setAsMaster();
|
||||
MockNode nodeB = createMockNode("nodeB");
|
||||
|
||||
// Initial cluster state
|
||||
ClusterState clusterState = nodeA.clusterState;
|
||||
|
@ -282,7 +283,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
|
||||
// Adding new node - this node should get full cluster state while nodeB should still be getting diffs
|
||||
|
||||
MockNode nodeC = createMockNode("nodeC", Settings.EMPTY);
|
||||
MockNode nodeC = createMockNode("nodeC");
|
||||
|
||||
// cluster state update 3 - register node C
|
||||
previousClusterState = clusterState;
|
||||
|
@ -336,7 +337,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
fail("Shouldn't send cluster state to myself");
|
||||
}).setAsMaster();
|
||||
|
||||
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
|
||||
MockNode nodeB = createMockNode("nodeB");
|
||||
|
||||
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
|
||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
|
||||
|
@ -444,7 +445,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
}
|
||||
}).setAsMaster();
|
||||
|
||||
MockNode nodeB = createMockNode("nodeB", Settings.EMPTY);
|
||||
MockNode nodeB = createMockNode("nodeB");
|
||||
|
||||
// Initial cluster state with both states - the second node still shouldn't get diff even though it's present in the previous cluster state
|
||||
DiscoveryNodes discoveryNodes = DiscoveryNodes.builder(nodeA.nodes()).add(nodeB.discoveryNode).build();
|
||||
|
@ -495,7 +496,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
final int dataNodes = randomIntBetween(0, 5);
|
||||
final Settings dataSettings = Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build();
|
||||
for (int i = 0; i < dataNodes; i++) {
|
||||
discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings).discoveryNode);
|
||||
discoveryNodesBuilder.add(createMockNode("data_" + i, dataSettings, null).discoveryNode);
|
||||
}
|
||||
discoveryNodesBuilder.localNodeId(master.discoveryNode.getId()).masterNodeId(master.discoveryNode.getId());
|
||||
DiscoveryNodes discoveryNodes = discoveryNodesBuilder.build();
|
||||
|
@ -521,7 +522,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
settings.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), expectingToCommit == false && timeOutNodes > 0 ? "100ms" : "1h")
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "5ms"); // test is about committing
|
||||
|
||||
MockNode master = createMockNode("master", settings.build());
|
||||
MockNode master = createMockNode("master", settings.build(), null);
|
||||
|
||||
// randomize things a bit
|
||||
int[] nodeTypes = new int[goodNodes + errorNodes + timeOutNodes];
|
||||
|
@ -551,7 +552,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
}
|
||||
final int dataNodes = randomIntBetween(0, 3); // data nodes don't matter
|
||||
for (int i = 0; i < dataNodes; i++) {
|
||||
final MockNode mockNode = createMockNode("data_" + i, Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build());
|
||||
final MockNode mockNode = createMockNode("data_" + i,
|
||||
Settings.builder().put(Node.NODE_MASTER_SETTING.getKey(), false).build(), null);
|
||||
discoveryNodesBuilder.add(mockNode.discoveryNode);
|
||||
if (randomBoolean()) {
|
||||
// we really don't care - just chaos monkey
|
||||
|
@ -726,8 +728,8 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put(DiscoverySettings.COMMIT_TIMEOUT_SETTING.getKey(), "1ms").build(); // short but so we will sometime commit sometime timeout
|
||||
|
||||
MockNode master = createMockNode("master", settings);
|
||||
MockNode node = createMockNode("node", settings);
|
||||
MockNode master = createMockNode("master", settings, null);
|
||||
MockNode node = createMockNode("node", settings, null);
|
||||
ClusterState state = ClusterState.builder(master.clusterState)
|
||||
.nodes(DiscoveryNodes.builder(master.clusterState.nodes()).add(node.discoveryNode).masterNodeId(master.discoveryNode.getId())).build();
|
||||
|
||||
|
@ -843,7 +845,7 @@ public class PublishClusterStateActionTests extends ESTestCase {
|
|||
assertFalse(actual.wasReadFromDiff());
|
||||
}
|
||||
|
||||
static class MockPublishAction extends PublishClusterStateAction {
|
||||
public static class MockPublishAction extends PublishClusterStateAction {
|
||||
|
||||
AtomicBoolean timeoutOnSend = new AtomicBoolean();
|
||||
AtomicBoolean errorOnSend = new AtomicBoolean();
|
||||
|
|
|
@ -396,49 +396,6 @@ public class NodeEnvironmentTests extends ESTestCase {
|
|||
env.close();
|
||||
}
|
||||
|
||||
public void testWhetherClusterFolderShouldBeUsed() throws Exception {
|
||||
Path tempNoCluster = createTempDir();
|
||||
Path tempDataPath = tempNoCluster.toAbsolutePath();
|
||||
|
||||
Path tempPath = tempNoCluster.resolve("foo"); // "foo" is the cluster name
|
||||
Path tempClusterPath = tempPath.toAbsolutePath();
|
||||
|
||||
assertFalse("non-existent directory should not be used", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
|
||||
Settings settings = Settings.builder()
|
||||
.put("cluster.name", "foo")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
|
||||
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
|
||||
Path nodeDataPath = env.nodeDataPaths()[0];
|
||||
assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
|
||||
}
|
||||
IOUtils.rm(tempNoCluster);
|
||||
|
||||
Files.createDirectories(tempPath);
|
||||
assertFalse("empty directory should not be read from", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
|
||||
settings = Settings.builder()
|
||||
.put("cluster.name", "foo")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), tempDataPath.toString()).build();
|
||||
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
|
||||
Path nodeDataPath = env.nodeDataPaths()[0];
|
||||
assertEquals(nodeDataPath, tempDataPath.resolve("nodes").resolve("0"));
|
||||
}
|
||||
IOUtils.rm(tempNoCluster);
|
||||
|
||||
// Create a directory for the cluster name
|
||||
Files.createDirectories(tempPath.resolve(NodeEnvironment.NODES_FOLDER));
|
||||
assertTrue("there is data in the directory", NodeEnvironment.readFromDataPathWithClusterName(tempPath));
|
||||
settings = Settings.builder()
|
||||
.put("cluster.name", "foo")
|
||||
.put(Environment.PATH_HOME_SETTING.getKey(), createTempDir().toAbsolutePath().toString())
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), tempClusterPath.toString()).build();
|
||||
try (NodeEnvironment env = new NodeEnvironment(settings, new Environment(settings))) {
|
||||
Path nodeDataPath = env.nodeDataPaths()[0];
|
||||
assertEquals(nodeDataPath, tempClusterPath.resolve("nodes").resolve("0"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testPersistentNodeId() throws IOException {
|
||||
String[] paths = tmpPaths();
|
||||
NodeEnvironment env = newNodeEnvironment(paths, Settings.builder()
|
||||
|
|
|
@ -930,36 +930,30 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
|
||||
private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields, @Nullable String routing) {
|
||||
for (String field : fields) {
|
||||
assertGetFieldWorks(index, type, docId, field, false, routing);
|
||||
assertGetFieldWorks(index, type, docId, field, true, routing);
|
||||
assertGetFieldWorks(index, type, docId, field, routing);
|
||||
assertGetFieldWorks(index, type, docId, field, routing);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertGetFieldWorks(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
|
||||
GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing);
|
||||
private void assertGetFieldWorks(String index, String type, String docId, String field, @Nullable String routing) {
|
||||
GetResponse response = getDocument(index, type, docId, field, routing);
|
||||
assertThat(response.getId(), equalTo(docId));
|
||||
assertTrue(response.isExists());
|
||||
assertNotNull(response.getField(field));
|
||||
response = multiGetDocument(index, type, docId, field, ignoreErrors, routing);
|
||||
response = multiGetDocument(index, type, docId, field, routing);
|
||||
assertThat(response.getId(), equalTo(docId));
|
||||
assertTrue(response.isExists());
|
||||
assertNotNull(response.getField(field));
|
||||
}
|
||||
|
||||
protected void assertGetFieldsException(String index, String type, String docId, String[] fields) {
|
||||
for (String field : fields) {
|
||||
assertGetFieldException(index, type, docId, field);
|
||||
}
|
||||
}
|
||||
|
||||
private void assertGetFieldException(String index, String type, String docId, String field) {
|
||||
try {
|
||||
client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(false).get();
|
||||
client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).get();
|
||||
fail();
|
||||
} catch (ElasticsearchException e) {
|
||||
assertTrue(e.getMessage().contains("You can only get this field after refresh() has been called."));
|
||||
}
|
||||
MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).setIgnoreErrorsOnGeneratedFields(false).get();
|
||||
MultiGetResponse multiGetResponse = client().prepareMultiGet().add(new MultiGetRequest.Item(index, type, docId).fields(field)).get();
|
||||
assertNull(multiGetResponse.getResponses()[0].getResponse());
|
||||
assertTrue(multiGetResponse.getResponses()[0].getFailure().getMessage().contains("You can only get this field after refresh() has been called."));
|
||||
}
|
||||
|
@ -970,7 +964,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
|
||||
protected void assertGetFieldsNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
|
||||
for (String field : fields) {
|
||||
assertGetFieldNull(index, type, docId, field, true, routing);
|
||||
assertGetFieldNull(index, type, docId, field, routing);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -980,37 +974,37 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
|
||||
protected void assertGetFieldsAlwaysNull(String index, String type, String docId, String[] fields, @Nullable String routing) {
|
||||
for (String field : fields) {
|
||||
assertGetFieldNull(index, type, docId, field, true, routing);
|
||||
assertGetFieldNull(index, type, docId, field, false, routing);
|
||||
assertGetFieldNull(index, type, docId, field, routing);
|
||||
assertGetFieldNull(index, type, docId, field, routing);
|
||||
}
|
||||
}
|
||||
|
||||
protected void assertGetFieldNull(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
|
||||
protected void assertGetFieldNull(String index, String type, String docId, String field, @Nullable String routing) {
|
||||
//for get
|
||||
GetResponse response = getDocument(index, type, docId, field, ignoreErrors, routing);
|
||||
GetResponse response = getDocument(index, type, docId, field, routing);
|
||||
assertTrue(response.isExists());
|
||||
assertNull(response.getField(field));
|
||||
assertThat(response.getId(), equalTo(docId));
|
||||
//same for multi get
|
||||
response = multiGetDocument(index, type, docId, field, ignoreErrors, routing);
|
||||
response = multiGetDocument(index, type, docId, field, routing);
|
||||
assertNull(response.getField(field));
|
||||
assertThat(response.getId(), equalTo(docId));
|
||||
assertTrue(response.isExists());
|
||||
}
|
||||
|
||||
private GetResponse multiGetDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
|
||||
private GetResponse multiGetDocument(String index, String type, String docId, String field, @Nullable String routing) {
|
||||
MultiGetRequest.Item getItem = new MultiGetRequest.Item(index, type, docId).fields(field);
|
||||
if (routing != null) {
|
||||
getItem.routing(routing);
|
||||
}
|
||||
MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem).setIgnoreErrorsOnGeneratedFields(ignoreErrors);
|
||||
MultiGetRequestBuilder multiGetRequestBuilder = client().prepareMultiGet().add(getItem);
|
||||
MultiGetResponse multiGetResponse = multiGetRequestBuilder.get();
|
||||
assertThat(multiGetResponse.getResponses().length, equalTo(1));
|
||||
return multiGetResponse.getResponses()[0].getResponse();
|
||||
}
|
||||
|
||||
private GetResponse getDocument(String index, String type, String docId, String field, boolean ignoreErrors, @Nullable String routing) {
|
||||
GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field).setIgnoreErrorsOnGeneratedFields(ignoreErrors);
|
||||
private GetResponse getDocument(String index, String type, String docId, String field, @Nullable String routing) {
|
||||
GetRequestBuilder getRequestBuilder = client().prepareGet().setIndex(index).setType(type).setId(docId).setFields(field);
|
||||
if (routing != null) {
|
||||
getRequestBuilder.setRouting(routing);
|
||||
}
|
||||
|
|
|
@ -43,6 +43,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.termQuery;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -52,13 +54,13 @@ import static org.hamcrest.Matchers.nullValue;
|
|||
public class IndexServiceTests extends ESSingleNodeTestCase {
|
||||
public void testDetermineShadowEngineShouldBeUsed() {
|
||||
Settings regularSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.build();
|
||||
|
||||
Settings shadowSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(SETTING_NUMBER_OF_SHARDS, 2)
|
||||
.put(SETTING_NUMBER_OF_REPLICAS, 1)
|
||||
.put(IndexMetaData.SETTING_SHADOW_REPLICAS, true)
|
||||
.build();
|
||||
|
||||
|
|
|
@ -18,15 +18,7 @@
|
|||
*/
|
||||
package org.elasticsearch.index.replication;
|
||||
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexNotFoundException;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.store.AlreadyClosedException;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
|
@ -41,52 +33,21 @@ import org.elasticsearch.action.support.replication.TransportWriteAction;
|
|||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.PeerRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource.StoreRecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.collect.Iterators;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MapperTestUtils;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.query.DisabledQueryCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexEventListener;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardState;
|
||||
import org.elasticsearch.index.shard.IndexShardTestCase;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardPath;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.DirectoryService;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoverySourceHandler;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
|
||||
import org.elasticsearch.test.DummyShardLock;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.junit.annotations.TestLogging;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
|
@ -94,10 +55,8 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.Future;
|
||||
import java.util.concurrent.FutureTask;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.function.Consumer;
|
||||
|
@ -107,98 +66,24 @@ import java.util.stream.StreamSupport;
|
|||
import static org.hamcrest.Matchers.empty;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
||||
public abstract class ESIndexLevelReplicationTestCase extends IndexShardTestCase {
|
||||
|
||||
protected ThreadPool threadPool;
|
||||
protected final Index index = new Index("test", "uuid");
|
||||
private final ShardId shardId = new ShardId(index, 0);
|
||||
private final Map<String, String> indexMapping = Collections.singletonMap("type", "{ \"type\": {} }");
|
||||
protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() {
|
||||
@Override
|
||||
public void onRecoveryDone(RecoveryState state) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
|
||||
fail(ExceptionsHelper.detailedMessage(e));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@TestLogging("index.shard:TRACE,index.replication:TRACE,indices.recovery:TRACE")
|
||||
public void testIndexingDuringFileRecovery() throws Exception {
|
||||
try (ReplicationGroup shards = createGroup(randomInt(1))) {
|
||||
shards.startAll();
|
||||
int docs = shards.indexDocs(randomInt(50));
|
||||
shards.flush();
|
||||
IndexShard replica = shards.addReplica();
|
||||
final CountDownLatch recoveryBlocked = new CountDownLatch(1);
|
||||
final CountDownLatch releaseRecovery = new CountDownLatch(1);
|
||||
final Future<Void> recoveryFuture = shards.asyncRecoverReplica(replica,
|
||||
new BiFunction<IndexShard, DiscoveryNode, RecoveryTarget>() {
|
||||
@Override
|
||||
public RecoveryTarget apply(IndexShard indexShard, DiscoveryNode node) {
|
||||
return new RecoveryTarget(indexShard, node, recoveryListener, version -> {}) {
|
||||
@Override
|
||||
public void renameAllTempFiles() throws IOException {
|
||||
super.renameAllTempFiles();
|
||||
recoveryBlocked.countDown();
|
||||
try {
|
||||
releaseRecovery.await();
|
||||
} catch (InterruptedException e) {
|
||||
throw new IOException("terminated by interrupt", e);
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
});
|
||||
|
||||
recoveryBlocked.await();
|
||||
docs += shards.indexDocs(randomInt(20));
|
||||
releaseRecovery.countDown();
|
||||
recoveryFuture.get();
|
||||
|
||||
shards.assertAllEqual(docs);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = new TestThreadPool(getClass().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
super.tearDown();
|
||||
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
|
||||
}
|
||||
|
||||
private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
|
||||
final ShardId shardId = shardPath.getShardId();
|
||||
final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
|
||||
@Override
|
||||
public Directory newDirectory() throws IOException {
|
||||
return newFSDirectory(shardPath.resolveIndex());
|
||||
}
|
||||
|
||||
@Override
|
||||
public long throttleTimeInNanos() {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
|
||||
}
|
||||
|
||||
protected ReplicationGroup createGroup(int replicas) throws IOException {
|
||||
final Path homePath = createTempDir();
|
||||
Settings build = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, replicas)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData metaData = IndexMetaData.builder(index.getName()).settings(build).primaryTerm(0, 1).build();
|
||||
return new ReplicationGroup(metaData, homePath);
|
||||
IndexMetaData.Builder metaData = IndexMetaData.builder(index.getName())
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1);
|
||||
for (Map.Entry<String, String> typeMapping: indexMapping.entrySet()) {
|
||||
metaData.putMapping(typeMapping.getKey(), typeMapping.getValue());
|
||||
}
|
||||
return new ReplicationGroup(metaData.build());
|
||||
}
|
||||
|
||||
protected DiscoveryNode getDiscoveryNode(String id) {
|
||||
|
@ -206,50 +91,22 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
Collections.singleton(DiscoveryNode.Role.DATA), Version.CURRENT);
|
||||
}
|
||||
|
||||
private IndexShard newShard(boolean primary, DiscoveryNode node, IndexMetaData indexMetaData, Path homePath) throws IOException {
|
||||
// add node name to settings for propper logging
|
||||
final Settings nodeSettings = Settings.builder().put("node.name", node.getName()).build();
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, node.getId(), primary, ShardRoutingState.INITIALIZING,
|
||||
primary ? StoreRecoverySource.EMPTY_STORE_INSTANCE : PeerRecoverySource.INSTANCE);
|
||||
final Path path = Files.createDirectories(homePath.resolve(node.getId()));
|
||||
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(path);
|
||||
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
|
||||
Store store = createStore(indexSettings, shardPath);
|
||||
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(homePath, indexSettings.getSettings());
|
||||
for (Map.Entry<String, String> type : indexMapping.entrySet()) {
|
||||
mapperService.merge(type.getKey(), new CompressedXContent(type.getValue()), MapperService.MergeReason.MAPPING_RECOVERY, true);
|
||||
}
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
final IndexEventListener indexEventListener = new IndexEventListener() {
|
||||
};
|
||||
final Engine.Warmer warmer = searcher -> {
|
||||
};
|
||||
return new IndexShard(shardRouting, indexSettings, shardPath, store, indexCache, mapperService, similarityService, null, null,
|
||||
indexEventListener, null, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer, Collections.emptyList(),
|
||||
Collections.emptyList());
|
||||
}
|
||||
|
||||
|
||||
protected class ReplicationGroup implements AutoCloseable, Iterable<IndexShard> {
|
||||
private final IndexShard primary;
|
||||
private final List<IndexShard> replicas;
|
||||
private final IndexMetaData indexMetaData;
|
||||
private final Path homePath;
|
||||
private final AtomicInteger replicaId = new AtomicInteger();
|
||||
private final AtomicInteger docId = new AtomicInteger();
|
||||
boolean closed = false;
|
||||
|
||||
ReplicationGroup(final IndexMetaData indexMetaData, Path homePath) throws IOException {
|
||||
primary = newShard(true, getDiscoveryNode("s0"), indexMetaData, homePath);
|
||||
ReplicationGroup(final IndexMetaData indexMetaData) throws IOException {
|
||||
primary = newShard(shardId, true, "s0", indexMetaData, null);
|
||||
replicas = new ArrayList<>();
|
||||
this.indexMetaData = indexMetaData;
|
||||
this.homePath = homePath;
|
||||
for (int i = 0; i < indexMetaData.getNumberOfReplicas(); i++) {
|
||||
addReplica();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public int indexDocs(final int numOfDoc) throws Exception {
|
||||
|
@ -289,7 +146,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
public synchronized IndexShard addReplica() throws IOException {
|
||||
final IndexShard replica = newShard(false, getDiscoveryNode("s" + replicaId.incrementAndGet()), indexMetaData, homePath);
|
||||
final IndexShard replica = newShard(shardId, false,"s" + replicaId.incrementAndGet(), indexMetaData, null);
|
||||
replicas.add(replica);
|
||||
return replica;
|
||||
}
|
||||
|
@ -304,39 +161,8 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
}
|
||||
|
||||
public void recoverReplica(IndexShard replica, BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
|
||||
boolean markAsRecovering)
|
||||
throws IOException {
|
||||
final DiscoveryNode pNode = getPrimaryNode();
|
||||
final DiscoveryNode rNode = getDiscoveryNode(replica.routingEntry().currentNodeId());
|
||||
if (markAsRecovering) {
|
||||
replica.markAsRecovering("remote",
|
||||
new RecoveryState(replica.routingEntry(), pNode, rNode));
|
||||
} else {
|
||||
assertEquals(replica.state(), IndexShardState.RECOVERING);
|
||||
}
|
||||
replica.prepareForIndexRecovery();
|
||||
RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
|
||||
StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
|
||||
getMetadataSnapshotOrEmpty(replica), false, 0);
|
||||
RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {},
|
||||
(int) ByteSizeUnit.MB.toKB(1), logger);
|
||||
recovery.recoverToTarget();
|
||||
recoveryTarget.markAsDone();
|
||||
replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
|
||||
}
|
||||
|
||||
private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
|
||||
Store.MetadataSnapshot result;
|
||||
try {
|
||||
result = replica.snapshotStoreMetadata();
|
||||
} catch (IndexNotFoundException e) {
|
||||
// OK!
|
||||
result = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed read store, treating as empty", e);
|
||||
result = Store.MetadataSnapshot.EMPTY;
|
||||
}
|
||||
return result;
|
||||
boolean markAsRecovering) throws IOException {
|
||||
ESIndexLevelReplicationTestCase.this.recoverReplica(replica, primary, targetSupplier, markAsRecovering);
|
||||
}
|
||||
|
||||
public synchronized DiscoveryNode getPrimaryNode() {
|
||||
|
@ -367,24 +193,6 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
}
|
||||
}
|
||||
|
||||
private Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
|
||||
shard.refresh("get_uids");
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||
Set<Uid> ids = new HashSet<>();
|
||||
for (LeafReaderContext leafContext : searcher.reader().leaves()) {
|
||||
LeafReader reader = leafContext.reader();
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
for (int i = 0; i < reader.maxDoc(); i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
|
||||
ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
|
||||
}
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
}
|
||||
|
||||
public synchronized void refresh(String source) {
|
||||
for (IndexShard shard : this) {
|
||||
shard.refresh(source);
|
||||
|
@ -406,10 +214,7 @@ public abstract class ESIndexLevelReplicationTestCase extends ESTestCase {
|
|||
public synchronized void close() throws Exception {
|
||||
if (closed == false) {
|
||||
closed = true;
|
||||
for (IndexShard shard : this) {
|
||||
shard.close("eol", false);
|
||||
IOUtils.close(shard.store());
|
||||
}
|
||||
closeShards(this);
|
||||
} else {
|
||||
throw new AlreadyClosedException("too bad");
|
||||
}
|
||||
|
|
|
@ -0,0 +1,476 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import org.apache.lucene.document.Field;
|
||||
import org.apache.lucene.document.NumericDocValuesField;
|
||||
import org.apache.lucene.index.Term;
|
||||
import org.apache.lucene.store.LockObtainFailedException;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndexStats;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.InternalClusterInfoService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.env.ShardLock;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.mapper.Mapping;
|
||||
import org.elasticsearch.index.mapper.ParseContext;
|
||||
import org.elasticsearch.index.mapper.ParsedDocument;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.translog.Translog;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.DummyShardLock;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.IndexSettingsModule;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.StandardCopyOption;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.BrokenBarrierException;
|
||||
import java.util.concurrent.CyclicBarrier;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.emptySet;
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.IMMEDIATE;
|
||||
import static org.elasticsearch.action.support.WriteRequest.RefreshPolicy.NONE;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_REPLICAS;
|
||||
import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS;
|
||||
import static org.elasticsearch.index.query.QueryBuilders.matchAllQuery;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class IndexShardIT extends ESSingleNodeTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> getPlugins() {
|
||||
return pluginList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
private ParsedDocument testParsedDocument(String uid, String id, String type, String routing, long timestamp, long ttl,
|
||||
ParseContext.Document document, BytesReference source, Mapping mappingUpdate) {
|
||||
Field uidField = new Field("_uid", uid, UidFieldMapper.Defaults.FIELD_TYPE);
|
||||
Field versionField = new NumericDocValuesField("_version", 0);
|
||||
document.add(uidField);
|
||||
document.add(versionField);
|
||||
return new ParsedDocument(versionField, id, type, routing, timestamp, ttl, Collections.singletonList(document), source,
|
||||
mappingUpdate);
|
||||
}
|
||||
|
||||
public void testLockTryingToDelete() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
|
||||
|
||||
ClusterService cs = getInstanceFromNode(ClusterService.class);
|
||||
final Index index = cs.state().metaData().index("test").getIndex();
|
||||
Path[] shardPaths = env.availableShardPaths(new ShardId(index, 0));
|
||||
logger.info("--> paths: [{}]", (Object)shardPaths);
|
||||
// Should not be able to acquire the lock because it's already open
|
||||
try {
|
||||
NodeEnvironment.acquireFSLockForPaths(IndexSettingsModule.newIndexSettings("test", Settings.EMPTY), shardPaths);
|
||||
fail("should not have been able to acquire the lock");
|
||||
} catch (LockObtainFailedException e) {
|
||||
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
|
||||
}
|
||||
// Test without the regular shard lock to assume we can acquire it
|
||||
// (worst case, meaning that the shard lock could be acquired and
|
||||
// we're green to delete the shard's directory)
|
||||
ShardLock sLock = new DummyShardLock(new ShardId(index, 0));
|
||||
try {
|
||||
env.deleteShardDirectoryUnderLock(sLock, IndexSettingsModule.newIndexSettings("test", Settings.EMPTY));
|
||||
fail("should not have been able to delete the directory");
|
||||
} catch (LockObtainFailedException e) {
|
||||
assertTrue("msg: " + e.getMessage(), e.getMessage().contains("unable to acquire write.lock"));
|
||||
}
|
||||
}
|
||||
|
||||
public void testMarkAsInactiveTriggersSyncedFlush() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
|
||||
client().prepareIndex("test", "test").setSource("{}").get();
|
||||
ensureGreen("test");
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
|
||||
assertBusy(() -> {
|
||||
IndexStats indexStats = client().admin().indices().prepareStats("test").clear().get().getIndex("test");
|
||||
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
indicesService.indexService(resolveIndex("test")).getShardOrNull(0).checkIdle(0);
|
||||
}
|
||||
);
|
||||
IndexStats indexStats = client().admin().indices().prepareStats("test").get().getIndex("test");
|
||||
assertNotNull(indexStats.getShards()[0].getCommitStats().getUserData().get(Engine.SYNC_COMMIT_ID));
|
||||
}
|
||||
|
||||
public void testDurableFlagHasEffect() {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
client().prepareIndex("test", "bar", "1").setSource("{}").get();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
setDurability(shard, Translog.Durability.REQUEST);
|
||||
assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
|
||||
setDurability(shard, Translog.Durability.ASYNC);
|
||||
client().prepareIndex("test", "bar", "2").setSource("{}").get();
|
||||
assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
|
||||
setDurability(shard, Translog.Durability.REQUEST);
|
||||
client().prepareDelete("test", "bar", "1").get();
|
||||
assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
|
||||
|
||||
setDurability(shard, Translog.Durability.ASYNC);
|
||||
client().prepareDelete("test", "bar", "2").get();
|
||||
assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
|
||||
setDurability(shard, Translog.Durability.REQUEST);
|
||||
assertNoFailures(client().prepareBulk()
|
||||
.add(client().prepareIndex("test", "bar", "3").setSource("{}"))
|
||||
.add(client().prepareDelete("test", "bar", "1")).get());
|
||||
assertFalse(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
|
||||
|
||||
setDurability(shard, Translog.Durability.ASYNC);
|
||||
assertNoFailures(client().prepareBulk()
|
||||
.add(client().prepareIndex("test", "bar", "4").setSource("{}"))
|
||||
.add(client().prepareDelete("test", "bar", "3")).get());
|
||||
setDurability(shard, Translog.Durability.REQUEST);
|
||||
assertTrue(ShardUtilsTests.getShardEngine(shard).getTranslog().syncNeeded());
|
||||
}
|
||||
|
||||
private void setDurability(IndexShard shard, Translog.Durability durability) {
|
||||
client().admin().indices().prepareUpdateSettings(shard.shardId().getIndexName()).setSettings(
|
||||
Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), durability.name()).build()).get();
|
||||
assertEquals(durability, shard.getTranslogDurability());
|
||||
}
|
||||
|
||||
public void testUpdatePriority() {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.setSettings(IndexMetaData.SETTING_PRIORITY, 200));
|
||||
IndexService indexService = getInstanceFromNode(IndicesService.class).indexService(resolveIndex("test"));
|
||||
assertEquals(200, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(IndexMetaData.SETTING_PRIORITY, 400)
|
||||
.build()).get();
|
||||
assertEquals(400, indexService.getIndexSettings().getSettings().getAsInt(IndexMetaData.SETTING_PRIORITY, 0).intValue());
|
||||
}
|
||||
|
||||
public void testIndexDirIsDeletedWhenShardRemoved() throws Exception {
|
||||
Environment env = getInstanceFromNode(Environment.class);
|
||||
Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
|
||||
logger.info("--> idxPath: [{}]", idxPath);
|
||||
Settings idxSettings = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, idxPath)
|
||||
.build();
|
||||
createIndex("test", idxSettings);
|
||||
ensureGreen("test");
|
||||
client().prepareIndex("test", "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
|
||||
SearchResponse response = client().prepareSearch("test").get();
|
||||
assertHitCount(response, 1L);
|
||||
client().admin().indices().prepareDelete("test").get();
|
||||
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
|
||||
assertPathHasBeenCleared(idxPath);
|
||||
}
|
||||
|
||||
public void testExpectedShardSizeIsPresent() throws InterruptedException {
|
||||
assertAcked(client().admin().indices().prepareCreate("test")
|
||||
.setSettings(SETTING_NUMBER_OF_SHARDS, 1, SETTING_NUMBER_OF_REPLICAS, 0));
|
||||
for (int i = 0; i < 50; i++) {
|
||||
client().prepareIndex("test", "test").setSource("{}").get();
|
||||
}
|
||||
ensureGreen("test");
|
||||
InternalClusterInfoService clusterInfoService = (InternalClusterInfoService) getInstanceFromNode(ClusterInfoService.class);
|
||||
clusterInfoService.refresh();
|
||||
ClusterState state = getInstanceFromNode(ClusterService.class).state();
|
||||
Long test = clusterInfoService.getClusterInfo().getShardSize(state.getRoutingTable().index("test")
|
||||
.getShards().get(0).primaryShard());
|
||||
assertNotNull(test);
|
||||
assertTrue(test > 0);
|
||||
}
|
||||
|
||||
public void testIndexCanChangeCustomDataPath() throws Exception {
|
||||
Environment env = getInstanceFromNode(Environment.class);
|
||||
Path idxPath = env.sharedDataFile().resolve(randomAsciiOfLength(10));
|
||||
final String INDEX = "idx";
|
||||
Path startDir = idxPath.resolve("start-" + randomAsciiOfLength(10));
|
||||
Path endDir = idxPath.resolve("end-" + randomAsciiOfLength(10));
|
||||
logger.info("--> start dir: [{}]", startDir.toAbsolutePath().toString());
|
||||
logger.info("--> end dir: [{}]", endDir.toAbsolutePath().toString());
|
||||
// temp dirs are automatically created, but the end dir is what
|
||||
// startDir is going to be renamed as, so it needs to be deleted
|
||||
// otherwise we get all sorts of errors about the directory
|
||||
// already existing
|
||||
IOUtils.rm(endDir);
|
||||
|
||||
Settings sb = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, startDir.toAbsolutePath().toString())
|
||||
.build();
|
||||
Settings sb2 = Settings.builder()
|
||||
.put(IndexMetaData.SETTING_DATA_PATH, endDir.toAbsolutePath().toString())
|
||||
.build();
|
||||
|
||||
logger.info("--> creating an index with data_path [{}]", startDir.toAbsolutePath().toString());
|
||||
createIndex(INDEX, sb);
|
||||
ensureGreen(INDEX);
|
||||
client().prepareIndex(INDEX, "bar", "1").setSource("{}").setRefreshPolicy(IMMEDIATE).get();
|
||||
|
||||
SearchResponse resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
|
||||
assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
|
||||
|
||||
logger.info("--> closing the index [{}]", INDEX);
|
||||
client().admin().indices().prepareClose(INDEX).get();
|
||||
logger.info("--> index closed, re-opening...");
|
||||
client().admin().indices().prepareOpen(INDEX).get();
|
||||
logger.info("--> index re-opened");
|
||||
ensureGreen(INDEX);
|
||||
|
||||
resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
|
||||
assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
|
||||
|
||||
// Now, try closing and changing the settings
|
||||
|
||||
logger.info("--> closing the index [{}]", INDEX);
|
||||
client().admin().indices().prepareClose(INDEX).get();
|
||||
|
||||
logger.info("--> moving data on disk [{}] to [{}]", startDir.getFileName(), endDir.getFileName());
|
||||
assert Files.exists(endDir) == false : "end directory should not exist!";
|
||||
Files.move(startDir, endDir, StandardCopyOption.REPLACE_EXISTING);
|
||||
|
||||
logger.info("--> updating settings...");
|
||||
client().admin().indices().prepareUpdateSettings(INDEX)
|
||||
.setSettings(sb2)
|
||||
.setIndicesOptions(IndicesOptions.fromOptions(true, false, true, true))
|
||||
.get();
|
||||
|
||||
assert Files.exists(startDir) == false : "start dir shouldn't exist";
|
||||
|
||||
logger.info("--> settings updated and files moved, re-opening index");
|
||||
client().admin().indices().prepareOpen(INDEX).get();
|
||||
logger.info("--> index re-opened");
|
||||
ensureGreen(INDEX);
|
||||
|
||||
resp = client().prepareSearch(INDEX).setQuery(matchAllQuery()).get();
|
||||
assertThat("found the hit", resp.getHits().getTotalHits(), equalTo(1L));
|
||||
|
||||
assertAcked(client().admin().indices().prepareDelete(INDEX));
|
||||
assertAllIndicesRemovedAndDeletionCompleted(Collections.singleton(getInstanceFromNode(IndicesService.class)));
|
||||
assertPathHasBeenCleared(startDir.toAbsolutePath());
|
||||
assertPathHasBeenCleared(endDir.toAbsolutePath());
|
||||
}
|
||||
|
||||
public void testMaybeFlush() throws Exception {
|
||||
createIndex("test", Settings.builder().put(IndexSettings.INDEX_TRANSLOG_DURABILITY_SETTING.getKey(), Translog.Durability.REQUEST)
|
||||
.build());
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
assertFalse(shard.shouldFlush());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder()
|
||||
.put(IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
||||
new ByteSizeValue(133 /* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
||||
assertFalse(shard.shouldFlush());
|
||||
ParsedDocument doc = testParsedDocument("1", "1", "test", null, -1, -1, new ParseContext.Document(),
|
||||
new BytesArray(new byte[]{1}), null);
|
||||
Engine.Index index = new Engine.Index(new Term("_uid", "1"), doc);
|
||||
shard.index(index);
|
||||
assertTrue(shard.shouldFlush());
|
||||
assertEquals(2, shard.getEngine().getTranslog().totalOperations());
|
||||
client().prepareIndex("test", "test", "2").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
||||
assertBusy(() -> { // this is async
|
||||
assertFalse(shard.shouldFlush());
|
||||
});
|
||||
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
|
||||
shard.getEngine().getTranslog().sync();
|
||||
long size = shard.getEngine().getTranslog().sizeInBytes();
|
||||
logger.info("--> current translog size: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
|
||||
shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(
|
||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(), new ByteSizeValue(size, ByteSizeUnit.BYTES))
|
||||
.build()).get();
|
||||
client().prepareDelete("test", "test", "2").get();
|
||||
logger.info("--> translog size after delete: [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
|
||||
shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
|
||||
assertBusy(() -> { // this is async
|
||||
logger.info("--> translog size on iter : [{}] num_ops [{}] generation [{}]", shard.getEngine().getTranslog().sizeInBytes(),
|
||||
shard.getEngine().getTranslog().totalOperations(), shard.getEngine().getTranslog().getGeneration());
|
||||
assertFalse(shard.shouldFlush());
|
||||
});
|
||||
assertEquals(0, shard.getEngine().getTranslog().totalOperations());
|
||||
}
|
||||
|
||||
public void testStressMaybeFlush() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
final IndexShard shard = test.getShardOrNull(0);
|
||||
assertFalse(shard.shouldFlush());
|
||||
client().admin().indices().prepareUpdateSettings("test").setSettings(Settings.builder().put(
|
||||
IndexSettings.INDEX_TRANSLOG_FLUSH_THRESHOLD_SIZE_SETTING.getKey(),
|
||||
new ByteSizeValue(133/* size of the operation + header&footer*/, ByteSizeUnit.BYTES)).build()).get();
|
||||
client().prepareIndex("test", "test", "0").setSource("{}").setRefreshPolicy(randomBoolean() ? IMMEDIATE : NONE).get();
|
||||
assertFalse(shard.shouldFlush());
|
||||
final AtomicBoolean running = new AtomicBoolean(true);
|
||||
final int numThreads = randomIntBetween(2, 4);
|
||||
Thread[] threads = new Thread[numThreads];
|
||||
CyclicBarrier barrier = new CyclicBarrier(numThreads + 1);
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i] = new Thread() {
|
||||
@Override
|
||||
public void run() {
|
||||
try {
|
||||
barrier.await();
|
||||
} catch (InterruptedException | BrokenBarrierException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
while (running.get()) {
|
||||
shard.maybeFlush();
|
||||
}
|
||||
}
|
||||
};
|
||||
threads[i].start();
|
||||
}
|
||||
barrier.await();
|
||||
FlushStats flushStats = shard.flushStats();
|
||||
long total = flushStats.getTotal();
|
||||
client().prepareIndex("test", "test", "1").setSource("{}").get();
|
||||
assertBusy(() -> assertEquals(total + 1, shard.flushStats().getTotal()));
|
||||
running.set(false);
|
||||
for (int i = 0; i < threads.length; i++) {
|
||||
threads[i].join();
|
||||
}
|
||||
assertEquals(total + 1, shard.flushStats().getTotal());
|
||||
}
|
||||
|
||||
public void testShardHasMemoryBufferOnTranslogRecover() throws Throwable {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService indexService = indicesService.indexService(resolveIndex("test"));
|
||||
IndexShard shard = indexService.getShardOrNull(0);
|
||||
client().prepareIndex("test", "test", "0").setSource("{\"foo\" : \"bar\"}").get();
|
||||
client().prepareDelete("test", "test", "0").get();
|
||||
client().prepareIndex("test", "test", "1").setSource("{\"foo\" : \"bar\"}").setRefreshPolicy(IMMEDIATE).get();
|
||||
|
||||
IndexSearcherWrapper wrapper = new IndexSearcherWrapper() {};
|
||||
shard.close("simon says", false);
|
||||
AtomicReference<IndexShard> shardRef = new AtomicReference<>();
|
||||
List<Exception> failures = new ArrayList<>();
|
||||
IndexingOperationListener listener = new IndexingOperationListener() {
|
||||
|
||||
@Override
|
||||
public void postIndex(Engine.Index index, boolean created) {
|
||||
try {
|
||||
assertNotNull(shardRef.get());
|
||||
// this is all IMC needs to do - check current memory and refresh
|
||||
assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
|
||||
shardRef.get().refresh("test");
|
||||
} catch (Exception e) {
|
||||
failures.add(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void postDelete(Engine.Delete delete) {
|
||||
try {
|
||||
assertNotNull(shardRef.get());
|
||||
// this is all IMC needs to do - check current memory and refresh
|
||||
assertTrue(shardRef.get().getIndexBufferRAMBytesUsed() > 0);
|
||||
shardRef.get().refresh("test");
|
||||
} catch (Exception e) {
|
||||
failures.add(e);
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
};
|
||||
final IndexShard newShard = newIndexShard(indexService, shard, wrapper, listener);
|
||||
shardRef.set(newShard);
|
||||
recoverShard(newShard);
|
||||
|
||||
try {
|
||||
ExceptionsHelper.rethrowAndSuppress(failures);
|
||||
} finally {
|
||||
newShard.close("just do it", randomBoolean());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
public static final IndexShard recoverShard(IndexShard newShard) throws IOException {
|
||||
DiscoveryNode localNode = new DiscoveryNode("foo", LocalTransportAddress.buildUnique(), emptyMap(), emptySet(), Version.CURRENT);
|
||||
newShard.markAsRecovering("store", new RecoveryState(newShard.routingEntry(), localNode, null));
|
||||
assertTrue(newShard.recoverFromStore());
|
||||
newShard.updateRoutingEntry(newShard.routingEntry().moveToStarted());
|
||||
return newShard;
|
||||
}
|
||||
|
||||
public static final IndexShard newIndexShard(IndexService indexService, IndexShard shard, IndexSearcherWrapper wrapper,
|
||||
IndexingOperationListener... listeners) throws IOException {
|
||||
ShardRouting initializingShardRouting = getInitializingShardRouting(shard.routingEntry());
|
||||
IndexShard newShard = new IndexShard(initializingShardRouting, indexService.getIndexSettings(), shard.shardPath(),
|
||||
shard.store(), indexService.cache(), indexService.mapperService(), indexService.similarityService(),
|
||||
indexService.fieldData(), shard.getEngineFactory(), indexService.getIndexEventListener(), wrapper,
|
||||
indexService.getThreadPool(), indexService.getBigArrays(), null, Collections.emptyList(), Arrays.asList(listeners));
|
||||
return newShard;
|
||||
}
|
||||
|
||||
private static ShardRouting getInitializingShardRouting(ShardRouting existingShardRouting) {
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(existingShardRouting.shardId(),
|
||||
existingShardRouting.currentNodeId(), null, existingShardRouting.primary(), ShardRoutingState.INITIALIZING,
|
||||
existingShardRouting.allocationId());
|
||||
shardRouting = shardRouting.updateUnassigned(new UnassignedInfo(UnassignedInfo.Reason.INDEX_REOPENED, "fake recovery"),
|
||||
RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE);
|
||||
return shardRouting;
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -28,6 +28,7 @@ import org.apache.lucene.index.LeafReaderContext;
|
|||
import org.apache.lucene.store.BaseDirectoryWrapper;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.common.lucene.index.ElasticsearchDirectoryReader;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -64,4 +65,7 @@ public class ShardUtilsTests extends ESTestCase {
|
|||
IOUtils.close(writer, dir);
|
||||
}
|
||||
|
||||
public static Engine getShardEngine(IndexShard shard) {
|
||||
return shard.getEngine();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.indices;
|
|||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.forcemerge.ForceMergeResponse;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
|
@ -31,7 +30,7 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.shard.IndexSearcherWrapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexShardTests;
|
||||
import org.elasticsearch.index.shard.IndexShardIT;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -443,7 +442,7 @@ public class IndexingMemoryControllerTests extends ESSingleNodeTestCase {
|
|||
shard.writeIndexingBuffer();
|
||||
}
|
||||
};
|
||||
final IndexShard newShard = IndexShardTests.newIndexShard(indexService, shard, wrapper, imc);
|
||||
final IndexShard newShard = IndexShardIT.newIndexShard(indexService, shard, wrapper, imc);
|
||||
shardRef.set(newShard);
|
||||
try {
|
||||
assertEquals(0, imc.availableShards().size());
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.internal;
|
||||
package org.elasticsearch.search;
|
||||
|
||||
import org.apache.lucene.queries.TermsQuery;
|
||||
import org.apache.lucene.search.BooleanQuery;
|
||||
|
@ -26,6 +26,7 @@ import org.apache.lucene.search.Query;
|
|||
import org.apache.lucene.util.BytesRef;
|
||||
import org.elasticsearch.common.lucene.search.Queries;
|
||||
import org.elasticsearch.index.mapper.TypeFieldMapper;
|
||||
import org.elasticsearch.search.DefaultSearchContext;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
import static org.apache.lucene.search.BooleanClause.Occur.FILTER;
|
|
@ -56,12 +56,7 @@ public class SearchRequestTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false,
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -119,12 +119,7 @@ public class AggregatorParsingTests extends ESTestCase {
|
|||
bindMapperExtension();
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false, emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -143,12 +143,7 @@ public abstract class BaseAggregationTestCase<AB extends AbstractAggregationBuil
|
|||
bindMapperExtension();
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false, emptyList()) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false, emptyList());
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -133,12 +133,7 @@ public class SearchSourceBuilderTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(settings, false,
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.elasticsearch.common.Strings;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.search.fetch.FetchSubPhase;
|
||||
import org.elasticsearch.search.internal.InternalSearchHit;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
|
@ -33,37 +35,11 @@ import org.elasticsearch.test.TestSearchContext;
|
|||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
||||
import static org.mockito.Mockito.mock;
|
||||
import static org.mockito.Mockito.when;
|
||||
|
||||
public class FetchSourceSubPhaseTests extends ESTestCase {
|
||||
|
||||
static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext {
|
||||
|
||||
FetchSourceContext context;
|
||||
BytesReference source;
|
||||
|
||||
FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) {
|
||||
super(null);
|
||||
this.context = context;
|
||||
this.source = source;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean sourceRequested() {
|
||||
return context != null && context.fetchSource();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FetchSourceContext fetchSourceContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchLookup lookup() {
|
||||
SearchLookup lookup = super.lookup();
|
||||
lookup.source().setSource(source);
|
||||
return lookup;
|
||||
}
|
||||
}
|
||||
|
||||
public void testFetchSource() throws IOException {
|
||||
XContentBuilder source = XContentFactory.jsonBuilder().startObject()
|
||||
.field("field", "value")
|
||||
|
@ -109,11 +85,14 @@ public class FetchSourceSubPhaseTests extends ESTestCase {
|
|||
hitContext = hitExecute(null, false, null, null);
|
||||
assertNull(hitContext.hit().sourceAsMap());
|
||||
|
||||
hitContext = hitExecute(null, true, "field1", null);
|
||||
assertNull(hitContext.hit().sourceAsMap());
|
||||
IllegalArgumentException exception = expectThrows(IllegalArgumentException.class, () -> hitExecute(null, true, "field1", null));
|
||||
assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " +
|
||||
"for index [index]", exception.getMessage());
|
||||
|
||||
hitContext = hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"});
|
||||
assertNull(hitContext.hit().sourceAsMap());
|
||||
exception = expectThrows(IllegalArgumentException.class,
|
||||
() -> hitExecuteMultiple(null, true, new String[]{"*"}, new String[]{"field2"}));
|
||||
assertEquals("unable to fetch fields from _source field: _source is disabled in the mappings " +
|
||||
"for index [index]", exception.getMessage());
|
||||
}
|
||||
|
||||
private FetchSubPhase.HitContext hitExecute(XContentBuilder source, boolean fetchSource, String include, String exclude) {
|
||||
|
@ -131,4 +110,40 @@ public class FetchSourceSubPhaseTests extends ESTestCase {
|
|||
phase.hitExecute(searchContext, hitContext);
|
||||
return hitContext;
|
||||
}
|
||||
|
||||
private static class FetchSourceSubPhaseTestSearchContext extends TestSearchContext {
|
||||
final FetchSourceContext context;
|
||||
final BytesReference source;
|
||||
final IndexShard indexShard;
|
||||
|
||||
FetchSourceSubPhaseTestSearchContext(FetchSourceContext context, BytesReference source) {
|
||||
super(null);
|
||||
this.context = context;
|
||||
this.source = source;
|
||||
this.indexShard = mock(IndexShard.class);
|
||||
when(indexShard.shardId()).thenReturn(new ShardId("index", "index", 1));
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean sourceRequested() {
|
||||
return context != null && context.fetchSource();
|
||||
}
|
||||
|
||||
@Override
|
||||
public FetchSourceContext fetchSourceContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
@Override
|
||||
public SearchLookup lookup() {
|
||||
SearchLookup lookup = super.lookup();
|
||||
lookup.source().setSource(source);
|
||||
return lookup;
|
||||
}
|
||||
|
||||
@Override
|
||||
public IndexShard indexShard() {
|
||||
return indexShard;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
package org.elasticsearch.search.fetch.subphase.highlight;
|
||||
|
||||
import com.carrotsearch.randomizedtesting.generators.RandomPicks;
|
||||
|
||||
import org.apache.lucene.search.join.ScoreMode;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
|
@ -38,6 +37,7 @@ import org.elasticsearch.index.query.MultiMatchQueryBuilder;
|
|||
import org.elasticsearch.index.query.Operator;
|
||||
import org.elasticsearch.index.query.QueryBuilder;
|
||||
import org.elasticsearch.index.query.QueryBuilders;
|
||||
import org.elasticsearch.index.query.functionscore.FunctionScoreQueryBuilder;
|
||||
import org.elasticsearch.index.search.MatchQuery;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
@ -50,8 +50,8 @@ import org.hamcrest.Matcher;
|
|||
import org.hamcrest.Matchers;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
|
@ -96,7 +96,7 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(InternalSettingsPlugin.class);
|
||||
return Collections.singletonList(InternalSettingsPlugin.class);
|
||||
}
|
||||
|
||||
public void testHighlightingWithWildcardName() throws IOException {
|
||||
|
@ -2851,4 +2851,21 @@ public class HighlighterSearchIT extends ESIntegTestCase {
|
|||
assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>"));
|
||||
assertThat(field.getFragments()[1].string(), equalTo("<em>cow</em>"));
|
||||
}
|
||||
|
||||
public void testFunctionScoreQueryHighlight() throws Exception {
|
||||
client().prepareIndex("test", "type", "1")
|
||||
.setSource(jsonBuilder().startObject().field("text", "brown").endObject())
|
||||
.setRefreshPolicy(WriteRequest.RefreshPolicy.IMMEDIATE)
|
||||
.get();
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch()
|
||||
.setQuery(new FunctionScoreQueryBuilder(QueryBuilders.prefixQuery("text", "bro")))
|
||||
.highlighter(new HighlightBuilder()
|
||||
.field(new Field("text")))
|
||||
.get();
|
||||
assertHitCount(searchResponse, 1);
|
||||
HighlightField field = searchResponse.getHits().getAt(0).highlightFields().get("text");
|
||||
assertThat(field.getFragments().length, equalTo(1));
|
||||
assertThat(field.getFragments()[0].string(), equalTo("<em>brown</em>"));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ public class SearchFieldsIT extends ESIntegTestCase {
|
|||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Arrays.asList(CustomScriptPlugin.class);
|
||||
return Collections.singletonList(CustomScriptPlugin.class);
|
||||
}
|
||||
|
||||
public static class CustomScriptPlugin extends MockScriptPlugin {
|
||||
|
|
|
@ -59,12 +59,7 @@ public class ShardSearchTransportRequestTests extends ESTestCase {
|
|||
}
|
||||
};
|
||||
SearchModule searchModule = new SearchModule(Settings.EMPTY, false,
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin())) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
Collections.singletonList(new FetchSubPhasePluginIT.FetchTermVectorsPlugin()));
|
||||
List<NamedWriteableRegistry.Entry> entries = new ArrayList<>();
|
||||
entries.addAll(indicesModule.getNamedWriteables());
|
||||
entries.addAll(searchModule.getNamedWriteables());
|
||||
|
|
|
@ -24,6 +24,9 @@ integTest {
|
|||
setting 'script.inline', 'true'
|
||||
setting 'script.stored', 'true'
|
||||
setting 'script.max_compilations_per_minute', '1000'
|
||||
/* Enable regexes in painless so our tests don't complain about example
|
||||
* snippets that use them. */
|
||||
setting 'script.painless.regex.enabled', 'true'
|
||||
Closure configFile = {
|
||||
extraConfigFile it, "src/test/cluster/config/$it"
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@ way to reindex old indices is to use the `reindex` API.
|
|||
* <<breaking_60_mapping_changes>>
|
||||
* <<breaking_60_rest_changes>>
|
||||
* <<breaking_60_search_changes>>
|
||||
* <<breaking_60_docs_changes>>
|
||||
* <<breaking_60_cluster_changes>>
|
||||
|
||||
include::migrate_6_0/mapping.asciidoc[]
|
||||
|
||||
|
@ -35,3 +37,5 @@ include::migrate_6_0/rest.asciidoc[]
|
|||
include::migrate_6_0/search.asciidoc[]
|
||||
|
||||
include::migrate_6_0/docs.asciidoc[]
|
||||
|
||||
include::migrate_6_0/cluster.asciidoc[]
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
[[breaking_60_cluster_changes]]
|
||||
=== Cluster changes
|
||||
|
||||
==== Cluster name no longer allowed in path.data
|
||||
|
||||
Previously the cluster name could be used in the `path.data` setting with a
|
||||
warning. This is now no longer allowed. For instance, in the previous version
|
||||
this was valid:
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
# Assuming path.data is /tmp/mydata
|
||||
# No longer supported:
|
||||
$ tree /tmp/mydata
|
||||
/tmp/mydata
|
||||
├── <cluster_name>
|
||||
│ └── nodes
|
||||
│ └── 0
|
||||
│ └── <etc>
|
||||
|
||||
# Should be changed to:
|
||||
$ tree /tmp/mydata
|
||||
/tmp/mydata
|
||||
├── nodes
|
||||
│ └── 0
|
||||
│ └── <etc>
|
||||
--------------------------------------------------
|
|
@ -1,4 +1,4 @@
|
|||
[[breaking_60_document_api_changes]]
|
||||
[[breaking_60_docs_changes]]
|
||||
=== Document API changes
|
||||
|
||||
==== version type 'force' removed
|
||||
|
|
|
@ -196,6 +196,15 @@ POST hockey/player/1/_update
|
|||
[[modules-scripting-painless-regex]]
|
||||
=== Regular expressions
|
||||
|
||||
NOTE: Regexes are disabled by default because they circumvent Painless's
|
||||
protection against long running and memory hungry scripts. To make matters
|
||||
worse even innocuous looking regexes can have staggering performance and stack
|
||||
depth behavior. They remain an amazing powerful tool but are too scary to enable
|
||||
by default. To enable them yourself set `script.painless.regex.enabled: true` in
|
||||
`elasticsearch.yml`. We'd like very much to have a safe alternative
|
||||
implementation that can be enabled by default so check this space for later
|
||||
developments!
|
||||
|
||||
Painless's native support for regular expressions has syntax constructs:
|
||||
|
||||
* `/pattern/`: Pattern literals create patterns. This is the only way to create
|
||||
|
|
|
@ -19,10 +19,18 @@
|
|||
|
||||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
/**
|
||||
* Settings to use when compiling a script.
|
||||
*/
|
||||
public final class CompilerSettings {
|
||||
/**
|
||||
* Are regexes enabled? This is a node level setting because regexes break out of painless's lovely sandbox and can cause stack
|
||||
* overflows and we can't analyze the regex to be sure it won't.
|
||||
*/
|
||||
public static final Setting<Boolean> REGEX_ENABLED = Setting.boolSetting("script.painless.regex.enabled", false, Property.NodeScope);
|
||||
|
||||
/**
|
||||
* Constant to be used when specifying the maximum loop counter when compiling a script.
|
||||
|
@ -55,6 +63,12 @@ public final class CompilerSettings {
|
|||
*/
|
||||
private int initialCallSiteDepth = 0;
|
||||
|
||||
/**
|
||||
* Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
|
||||
* <strong>looking</strong> regexes can cause stack overflows.
|
||||
*/
|
||||
private boolean regexesEnabled = false;
|
||||
|
||||
/**
|
||||
* Returns the value for the cumulative total number of statements that can be made in all loops
|
||||
* in a script before an exception is thrown. This attempts to prevent infinite loops. Note if
|
||||
|
@ -104,4 +118,20 @@ public final class CompilerSettings {
|
|||
public void setInitialCallSiteDepth(int depth) {
|
||||
this.initialCallSiteDepth = depth;
|
||||
}
|
||||
|
||||
/**
|
||||
* Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
|
||||
* <strong>looking</strong> regexes can cause stack overflows.
|
||||
*/
|
||||
public boolean areRegexesEnabled() {
|
||||
return regexesEnabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Are regexes enabled? They are currently disabled by default because they break out of the loop counter and even fairly simple
|
||||
* <strong>looking</strong> regexes can cause stack overflows.
|
||||
*/
|
||||
public void setRegexesEnabled(boolean regexesEnabled) {
|
||||
this.regexesEnabled = regexesEnabled;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,19 +20,21 @@
|
|||
package org.elasticsearch.painless;
|
||||
|
||||
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.ScriptPlugin;
|
||||
import org.elasticsearch.script.ScriptEngineRegistry;
|
||||
import org.elasticsearch.script.ScriptEngineService;
|
||||
import org.elasticsearch.script.ScriptModule;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* Registers Painless as a plugin.
|
||||
*/
|
||||
public final class PainlessPlugin extends Plugin implements ScriptPlugin {
|
||||
|
||||
// force to pare our definition at startup (not on the user's first script)
|
||||
// force to parse our definition at startup (not on the user's first script)
|
||||
static {
|
||||
Definition.VOID_TYPE.hashCode();
|
||||
}
|
||||
|
@ -41,4 +43,9 @@ public final class PainlessPlugin extends Plugin implements ScriptPlugin {
|
|||
public ScriptEngineService getScriptEngineService(Settings settings) {
|
||||
return new PainlessScriptEngineService(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<Setting<?>> getSettings() {
|
||||
return Arrays.asList(CompilerSettings.REGEX_ENABLED);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -53,11 +53,6 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
|
|||
*/
|
||||
public static final String NAME = "painless";
|
||||
|
||||
/**
|
||||
* Default compiler settings to be used.
|
||||
*/
|
||||
private static final CompilerSettings DEFAULT_COMPILER_SETTINGS = new CompilerSettings();
|
||||
|
||||
/**
|
||||
* Permissions context used during compilation.
|
||||
*/
|
||||
|
@ -74,12 +69,19 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
|
|||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Default compiler settings to be used. Note that {@link CompilerSettings} is mutable but this instance shouldn't be mutated outside
|
||||
* of {@link PainlessScriptEngineService#PainlessScriptEngineService(Settings)}.
|
||||
*/
|
||||
private final CompilerSettings defaultCompilerSettings = new CompilerSettings();
|
||||
|
||||
/**
|
||||
* Constructor.
|
||||
* @param settings The settings to initialize the engine with.
|
||||
*/
|
||||
public PainlessScriptEngineService(final Settings settings) {
|
||||
super(settings);
|
||||
defaultCompilerSettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(settings));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -111,29 +113,36 @@ public final class PainlessScriptEngineService extends AbstractComponent impleme
|
|||
|
||||
if (params.isEmpty()) {
|
||||
// Use the default settings.
|
||||
compilerSettings = DEFAULT_COMPILER_SETTINGS;
|
||||
compilerSettings = defaultCompilerSettings;
|
||||
} else {
|
||||
// Use custom settings specified by params.
|
||||
compilerSettings = new CompilerSettings();
|
||||
Map<String, String> copy = new HashMap<>(params);
|
||||
String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER);
|
||||
|
||||
// Except regexes enabled - this is a node level setting and can't be changed in the request.
|
||||
compilerSettings.setRegexesEnabled(defaultCompilerSettings.areRegexesEnabled());
|
||||
|
||||
Map<String, String> copy = new HashMap<>(params);
|
||||
|
||||
String value = copy.remove(CompilerSettings.MAX_LOOP_COUNTER);
|
||||
if (value != null) {
|
||||
compilerSettings.setMaxLoopCounter(Integer.parseInt(value));
|
||||
}
|
||||
|
||||
value = copy.remove(CompilerSettings.PICKY);
|
||||
|
||||
if (value != null) {
|
||||
compilerSettings.setPicky(Boolean.parseBoolean(value));
|
||||
}
|
||||
|
||||
value = copy.remove(CompilerSettings.INITIAL_CALL_SITE_DEPTH);
|
||||
|
||||
if (value != null) {
|
||||
compilerSettings.setInitialCallSiteDepth(Integer.parseInt(value));
|
||||
}
|
||||
|
||||
value = copy.remove(CompilerSettings.REGEX_ENABLED.getKey());
|
||||
if (value != null) {
|
||||
throw new IllegalArgumentException("[painless.regex.enabled] can only be set on node startup.");
|
||||
}
|
||||
|
||||
if (!copy.isEmpty()) {
|
||||
throw new IllegalArgumentException("Unrecognized compile-time parameter(s): " + copy);
|
||||
}
|
||||
|
|
|
@ -796,6 +796,11 @@ public final class Walker extends PainlessParserBaseVisitor<ANode> {
|
|||
|
||||
@Override
|
||||
public ANode visitRegex(RegexContext ctx) {
|
||||
if (false == settings.areRegexesEnabled()) {
|
||||
throw location(ctx).createError(new IllegalStateException("Regexes are disabled. Set [script.painless.regex.enabled] to [true] "
|
||||
+ "in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep "
|
||||
+ "recursion and long loops."));
|
||||
}
|
||||
String text = ctx.REGEX().getText();
|
||||
int lastSlash = text.lastIndexOf('/');
|
||||
String pattern = text.substring(1, lastSlash);
|
||||
|
|
|
@ -19,17 +19,26 @@
|
|||
|
||||
package org.elasticsearch.painless;
|
||||
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.nio.CharBuffer;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.regex.PatternSyntaxException;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static java.util.Collections.singletonMap;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
|
||||
public class RegexTests extends ScriptTestCase {
|
||||
@Override
|
||||
protected Settings scriptEngineSettings() {
|
||||
// Enable regexes just for this test. They are disabled by default.
|
||||
return Settings.builder()
|
||||
.put(CompilerSettings.REGEX_ENABLED.getKey(), true)
|
||||
.build();
|
||||
}
|
||||
|
||||
public void testPatternAfterReturn() {
|
||||
assertEquals(true, exec("return 'foo' ==~ /foo/"));
|
||||
assertEquals(false, exec("return 'bar' ==~ /foo/"));
|
||||
|
|
|
@ -45,7 +45,14 @@ public abstract class ScriptTestCase extends ESTestCase {
|
|||
|
||||
@Before
|
||||
public void setup() {
|
||||
scriptEngine = new PainlessScriptEngineService(Settings.EMPTY);
|
||||
scriptEngine = new PainlessScriptEngineService(scriptEngineSettings());
|
||||
}
|
||||
|
||||
/**
|
||||
* Settings used to build the script engine. Override to customize settings like {@link RegexTests} does to enable regexes.
|
||||
*/
|
||||
protected Settings scriptEngineSettings() {
|
||||
return Settings.EMPTY;
|
||||
}
|
||||
|
||||
/** Compiles and returns the result of {@code script} */
|
||||
|
@ -71,6 +78,7 @@ public abstract class ScriptTestCase extends ESTestCase {
|
|||
if (picky) {
|
||||
CompilerSettings pickySettings = new CompilerSettings();
|
||||
pickySettings.setPicky(true);
|
||||
pickySettings.setRegexesEnabled(CompilerSettings.REGEX_ENABLED.get(scriptEngineSettings()));
|
||||
Walker.buildPainlessTree(getTestName(), script, pickySettings, null);
|
||||
}
|
||||
// test actual script execution
|
||||
|
|
|
@ -20,14 +20,13 @@
|
|||
package org.elasticsearch.painless;
|
||||
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.script.ScriptException;
|
||||
|
||||
import java.lang.invoke.WrongMethodTypeException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
|
||||
import static java.util.Collections.emptyMap;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
public class WhenThingsGoWrongTests extends ScriptTestCase {
|
||||
public void testNullPointer() {
|
||||
|
@ -234,4 +233,16 @@ public class WhenThingsGoWrongTests extends ScriptTestCase {
|
|||
exec("void recurse(int x, int y) {recurse(x, y)} recurse(1, 2);");
|
||||
});
|
||||
}
|
||||
|
||||
public void testRegexDisabledByDefault() {
|
||||
IllegalStateException e = expectThrows(IllegalStateException.class, () -> exec("return 'foo' ==~ /foo/"));
|
||||
assertEquals("Regexes are disabled. Set [script.painless.regex.enabled] to [true] in elasticsearch.yaml to allow them. "
|
||||
+ "Be careful though, regexes break out of Painless's protection against deep recursion and long loops.", e.getMessage());
|
||||
}
|
||||
|
||||
public void testCanNotOverrideRegexEnabled() {
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> exec("", null, singletonMap(CompilerSettings.REGEX_ENABLED.getKey(), "true"), null, false));
|
||||
assertEquals("[painless.regex.enabled] can only be set on node startup.", e.getMessage());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
---
|
||||
"Regex in update fails":
|
||||
|
||||
- do:
|
||||
index:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body:
|
||||
foo: bar
|
||||
count: 1
|
||||
|
||||
- do:
|
||||
catch: /Regexes are disabled. Set \[script.painless.regex.enabled\] to \[true\] in elasticsearch.yaml to allow them. Be careful though, regexes break out of Painless's protection against deep recursion and long loops./
|
||||
update:
|
||||
index: test_1
|
||||
type: test
|
||||
id: 1
|
||||
body:
|
||||
script:
|
||||
lang: painless
|
||||
inline: "ctx._source.foo = params.bar ==~ /cat/"
|
||||
params: { bar: 'xxx' }
|
||||
|
||||
---
|
||||
"Regex enabled is not a dynamic setting":
|
||||
|
||||
- do:
|
||||
catch: /setting \[script.painless.regex.enabled\], not dynamically updateable/
|
||||
cluster.put_settings:
|
||||
body:
|
||||
transient:
|
||||
script.painless.regex.enabled: true
|
|
@ -143,15 +143,14 @@ public class PercolatorBackwardsCompatibilityTests extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
private void setupNode() throws Exception {
|
||||
Path dataDir = createTempDir();
|
||||
Path clusterDir = Files.createDirectory(dataDir.resolve(cluster().getClusterName()));
|
||||
Path clusterDir = createTempDir();
|
||||
try (InputStream stream = PercolatorBackwardsCompatibilityTests.class.
|
||||
getResourceAsStream("/indices/percolator/bwc_index_2.0.0.zip")) {
|
||||
TestUtil.unzip(stream, clusterDir);
|
||||
}
|
||||
|
||||
Settings.Builder nodeSettings = Settings.builder()
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), dataDir);
|
||||
.put(Environment.PATH_DATA_SETTING.getKey(), clusterDir);
|
||||
internalCluster().startNode(nodeSettings.build());
|
||||
ensureGreen(INDEX_NAME);
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ public class ListPluginsCommandTests extends ESTestCase {
|
|||
static MockTerminal listPlugins(Path home) throws Exception {
|
||||
return listPlugins(home, new String[0]);
|
||||
}
|
||||
|
||||
|
||||
static MockTerminal listPlugins(Path home, String[] args) throws Exception {
|
||||
String[] argsAndHome = new String[args.length + 1];
|
||||
System.arraycopy(args, 0, argsAndHome, 0, args.length);
|
||||
|
@ -69,16 +69,16 @@ public class ListPluginsCommandTests extends ESTestCase {
|
|||
assertEquals(ExitCodes.OK, status);
|
||||
return terminal;
|
||||
}
|
||||
|
||||
|
||||
static String buildMultiline(String... args){
|
||||
return Arrays.asList(args).stream().collect(Collectors.joining("\n", "", "\n"));
|
||||
}
|
||||
|
||||
static void buildFakePlugin(Environment env, String description, String name, String classname) throws IOException {
|
||||
|
||||
static void buildFakePlugin(Environment env, String description, String name, String classname, String version) throws IOException {
|
||||
PluginTestUtil.writeProperties(env.pluginsFile().resolve(name),
|
||||
"description", description,
|
||||
"name", name,
|
||||
"version", "1.0",
|
||||
"version", version,
|
||||
"elasticsearch.version", Version.CURRENT.toString(),
|
||||
"java.version", System.getProperty("java.specification.version"),
|
||||
"classname", classname);
|
||||
|
@ -97,51 +97,51 @@ public class ListPluginsCommandTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testOnePlugin() throws Exception {
|
||||
buildFakePlugin(env, "fake desc", "fake", "org.fake");
|
||||
buildFakePlugin(env, "fake desc", "fake", "org.fake", "1.0.0");
|
||||
MockTerminal terminal = listPlugins(home);
|
||||
assertEquals(terminal.getOutput(), buildMultiline("fake"));
|
||||
assertEquals(terminal.getOutput(), buildMultiline("fake@1.0.0"));
|
||||
}
|
||||
|
||||
public void testTwoPlugins() throws Exception {
|
||||
buildFakePlugin(env, "fake desc", "fake1", "org.fake");
|
||||
buildFakePlugin(env, "fake desc 2", "fake2", "org.fake");
|
||||
buildFakePlugin(env, "fake desc", "fake1", "org.fake", "1.2.3");
|
||||
buildFakePlugin(env, "fake desc 2", "fake2", "org.fake", "6.5.4");
|
||||
MockTerminal terminal = listPlugins(home);
|
||||
assertEquals(terminal.getOutput(), buildMultiline("fake1", "fake2"));
|
||||
assertEquals(terminal.getOutput(), buildMultiline("fake1@1.2.3", "fake2@6.5.4"));
|
||||
}
|
||||
|
||||
|
||||
public void testPluginWithVerbose() throws Exception {
|
||||
buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake");
|
||||
buildFakePlugin(env, "fake desc", "fake_plugin", "org.fake", "1.0.0");
|
||||
String[] params = { "-v" };
|
||||
MockTerminal terminal = listPlugins(home, params);
|
||||
assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin",
|
||||
"- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0", " * Classname: org.fake"));
|
||||
assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(), "fake_plugin@1.0.0",
|
||||
"- Plugin information:", "Name: fake_plugin", "Description: fake desc", "Version: 1.0.0", " * Classname: org.fake"));
|
||||
}
|
||||
|
||||
|
||||
public void testPluginWithVerboseMultiplePlugins() throws Exception {
|
||||
buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake");
|
||||
buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2");
|
||||
buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.2.3");
|
||||
buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "6.5.4");
|
||||
String[] params = { "-v" };
|
||||
MockTerminal terminal = listPlugins(home, params);
|
||||
assertEquals(terminal.getOutput(), buildMultiline("Plugins directory: " + env.pluginsFile(),
|
||||
"fake_plugin1", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.0",
|
||||
" * Classname: org.fake", "fake_plugin2", "- Plugin information:", "Name: fake_plugin2",
|
||||
"Description: fake desc 2", "Version: 1.0", " * Classname: org.fake2"));
|
||||
"fake_plugin1@1.2.3", "- Plugin information:", "Name: fake_plugin1", "Description: fake desc 1", "Version: 1.2.3",
|
||||
" * Classname: org.fake", "fake_plugin2@6.5.4", "- Plugin information:", "Name: fake_plugin2",
|
||||
"Description: fake desc 2", "Version: 6.5.4", " * Classname: org.fake2"));
|
||||
}
|
||||
|
||||
|
||||
public void testPluginWithoutVerboseMultiplePlugins() throws Exception {
|
||||
buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake");
|
||||
buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2");
|
||||
buildFakePlugin(env, "fake desc 1", "fake_plugin1", "org.fake", "1.0.0");
|
||||
buildFakePlugin(env, "fake desc 2", "fake_plugin2", "org.fake2", "1.0.0");
|
||||
MockTerminal terminal = listPlugins(home, new String[0]);
|
||||
String output = terminal.getOutput();
|
||||
assertEquals(output, buildMultiline("fake_plugin1", "fake_plugin2"));
|
||||
assertEquals(output, buildMultiline("fake_plugin1@1.0.0", "fake_plugin2@1.0.0"));
|
||||
}
|
||||
|
||||
|
||||
public void testPluginWithoutDescriptorFile() throws Exception{
|
||||
Files.createDirectories(env.pluginsFile().resolve("fake1"));
|
||||
NoSuchFileException e = expectThrows(NoSuchFileException.class, () -> listPlugins(home));
|
||||
assertEquals(e.getFile(), env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString());
|
||||
}
|
||||
|
||||
|
||||
public void testPluginWithWrongDescriptorFile() throws Exception{
|
||||
PluginTestUtil.writeProperties(env.pluginsFile().resolve("fake1"),
|
||||
"description", "fake desc");
|
||||
|
@ -149,5 +149,5 @@ public class ListPluginsCommandTests extends ESTestCase {
|
|||
assertEquals(e.getMessage(), "Property [name] is missing in [" +
|
||||
env.pluginsFile().resolve("fake1").resolve(PluginInfo.ES_PLUGIN_PROPERTIES).toString() + "]");
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -37,7 +37,8 @@ List<String> availableBoxes = [
|
|||
'sles-12',
|
||||
'ubuntu-1204',
|
||||
'ubuntu-1404',
|
||||
'ubuntu-1504'
|
||||
'ubuntu-1504',
|
||||
'ubuntu-1604'
|
||||
]
|
||||
|
||||
String vagrantBoxes = getProperties().get('vagrant.boxes', 'sample')
|
||||
|
@ -122,7 +123,7 @@ task stop {
|
|||
|
||||
Set<String> getVersions() {
|
||||
Node xml
|
||||
new URL('http://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
new URL('https://repo1.maven.org/maven2/org/elasticsearch/elasticsearch/maven-metadata.xml').openStream().withStream { s ->
|
||||
xml = new XmlParser().parse(s)
|
||||
}
|
||||
return new TreeSet<>(xml.versioning.versions.version.collect { it.text() }.findAll { it ==~ /2\.\d\.\d/ })
|
||||
|
|
|
@ -300,7 +300,7 @@ fi
|
|||
}
|
||||
|
||||
@test "[$GROUP] check the installed plugins can be listed with 'plugins list' and result matches the list of plugins in plugins pom" {
|
||||
"$ESHOME/bin/elasticsearch-plugin" list > /tmp/installed
|
||||
"$ESHOME/bin/elasticsearch-plugin" list | cut -d'@' -f1 > /tmp/installed
|
||||
compare_plugins_list "/tmp/installed" "'plugins list'"
|
||||
}
|
||||
|
||||
|
|
|
@ -22,8 +22,8 @@
|
|||
- is_true: nodes.os.mem.total_in_bytes
|
||||
- is_true: nodes.os.mem.free_in_bytes
|
||||
- is_true: nodes.os.mem.used_in_bytes
|
||||
- is_true: nodes.os.mem.free_percent
|
||||
- is_true: nodes.os.mem.used_percent
|
||||
- gte: { nodes.os.mem.free_percent: 0 }
|
||||
- gte: { nodes.os.mem.used_percent: 0 }
|
||||
- is_true: nodes.process
|
||||
- is_true: nodes.jvm
|
||||
- is_true: nodes.fs
|
||||
|
|
|
@ -38,10 +38,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.SameShardAllocationD
|
|||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.gateway.AsyncShardFetch;
|
||||
import org.elasticsearch.gateway.GatewayAllocator;
|
||||
import org.elasticsearch.gateway.ReplicaShardAllocator;
|
||||
import org.elasticsearch.indices.store.TransportNodesListShardStoreMetaData;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.gateway.NoopGatewayAllocator;
|
||||
|
||||
|
@ -209,14 +206,6 @@ public abstract class ESAllocationTestCase extends ESTestCase {
|
|||
* Mocks behavior in ReplicaShardAllocator to remove delayed shards from list of unassigned shards so they don't get reassigned yet.
|
||||
*/
|
||||
protected static class DelayedShardsMockGatewayAllocator extends GatewayAllocator {
|
||||
private final ReplicaShardAllocator replicaShardAllocator = new ReplicaShardAllocator(Settings.EMPTY) {
|
||||
@Override
|
||||
protected AsyncShardFetch.FetchResult<TransportNodesListShardStoreMetaData.NodeStoreFilesMetaData>
|
||||
fetchData(ShardRouting shard, RoutingAllocation allocation) {
|
||||
return new AsyncShardFetch.FetchResult<>(shard.shardId(), null, Collections.emptySet(), Collections.emptySet());
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
public DelayedShardsMockGatewayAllocator() {
|
||||
super(Settings.EMPTY, null, null);
|
||||
|
@ -236,7 +225,9 @@ public abstract class ESAllocationTestCase extends ESTestCase {
|
|||
if (shard.primary() || shard.unassignedInfo().getReason() == UnassignedInfo.Reason.INDEX_CREATED) {
|
||||
continue;
|
||||
}
|
||||
replicaShardAllocator.ignoreUnassignedIfDelayed(unassignedIterator, shard, allocation.changes());
|
||||
if (shard.unassignedInfo().isDelayed()) {
|
||||
unassignedIterator.removeAndIgnore(UnassignedInfo.AllocationStatus.DELAYED_ALLOCATION, allocation.changes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,477 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch.index.shard;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import org.apache.lucene.document.Document;
|
||||
import org.apache.lucene.index.IndexNotFoundException;
|
||||
import org.apache.lucene.index.LeafReader;
|
||||
import org.apache.lucene.index.LeafReaderContext;
|
||||
import org.apache.lucene.store.Directory;
|
||||
import org.apache.lucene.util.Bits;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.admin.indices.flush.FlushRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MappingMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RecoverySource;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingHelper;
|
||||
import org.elasticsearch.cluster.routing.ShardRoutingState;
|
||||
import org.elasticsearch.cluster.routing.TestShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.MapperTestUtils;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.index.cache.IndexCache;
|
||||
import org.elasticsearch.index.cache.query.DisabledQueryCache;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataCache;
|
||||
import org.elasticsearch.index.fielddata.IndexFieldDataService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.mapper.SourceToParse;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.similarity.SimilarityService;
|
||||
import org.elasticsearch.index.store.DirectoryService;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.breaker.NoneCircuitBreakerService;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.recovery.PeerRecoveryTargetService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryFailedException;
|
||||
import org.elasticsearch.indices.recovery.RecoverySourceHandler;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.indices.recovery.RecoveryTarget;
|
||||
import org.elasticsearch.indices.recovery.StartRecoveryRequest;
|
||||
import org.elasticsearch.test.DummyShardLock;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.threadpool.TestThreadPool;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.BiFunction;
|
||||
|
||||
import static org.hamcrest.Matchers.contains;
|
||||
import static org.hamcrest.Matchers.hasSize;
|
||||
|
||||
/**
|
||||
* A base class for unit tests that need to create and shutdown {@link IndexShard} instances easily,
|
||||
* containing utilities for shard creation and recoveries. See {{@link #newShard(boolean)}} and
|
||||
* {@link #newStartedShard()} for a good starting points
|
||||
*/
|
||||
public abstract class IndexShardTestCase extends ESTestCase {
|
||||
|
||||
protected static final PeerRecoveryTargetService.RecoveryListener recoveryListener = new PeerRecoveryTargetService.RecoveryListener() {
|
||||
@Override
|
||||
public void onRecoveryDone(RecoveryState state) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onRecoveryFailure(RecoveryState state, RecoveryFailedException e, boolean sendShardFailure) {
|
||||
throw new AssertionError(e);
|
||||
}
|
||||
};
|
||||
|
||||
protected ThreadPool threadPool;
|
||||
|
||||
@Override
|
||||
public void setUp() throws Exception {
|
||||
super.setUp();
|
||||
threadPool = new TestThreadPool(getClass().getName());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void tearDown() throws Exception {
|
||||
try {
|
||||
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
|
||||
} finally {
|
||||
super.tearDown();
|
||||
}
|
||||
}
|
||||
|
||||
private Store createStore(IndexSettings indexSettings, ShardPath shardPath) throws IOException {
|
||||
final ShardId shardId = shardPath.getShardId();
|
||||
final DirectoryService directoryService = new DirectoryService(shardId, indexSettings) {
|
||||
@Override
|
||||
public Directory newDirectory() throws IOException {
|
||||
return newFSDirectory(shardPath.resolveIndex());
|
||||
}
|
||||
|
||||
@Override
|
||||
public long throttleTimeInNanos() {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
return new Store(shardId, indexSettings, directoryService, new DummyShardLock(shardId));
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard. The shard will have its own unique data path.
|
||||
*
|
||||
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
|
||||
* (ready to recover from another shard)
|
||||
*/
|
||||
protected IndexShard newShard(boolean primary) throws IOException {
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(new ShardId("index", "_na_", 0), "n1", primary,
|
||||
ShardRoutingState.INITIALIZING,
|
||||
primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
|
||||
return newShard(shardRouting);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard. The shard will have its own unique data path.
|
||||
*
|
||||
* @param shardRouting the {@link ShardRouting} to use for this shard
|
||||
* @param listeners an optional set of listeners to add to the shard
|
||||
*/
|
||||
protected IndexShard newShard(ShardRouting shardRouting, IndexingOperationListener... listeners) throws IOException {
|
||||
assert shardRouting.initializing() : shardRouting;
|
||||
Settings settings = Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1)
|
||||
.build();
|
||||
IndexMetaData.Builder metaData = IndexMetaData.builder(shardRouting.getIndexName())
|
||||
.settings(settings)
|
||||
.primaryTerm(0, 1);
|
||||
return newShard(shardRouting, metaData.build(), listeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard. The shard will have its own unique data path.
|
||||
*
|
||||
* @param shardId the shard id to use
|
||||
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
|
||||
* (ready to recover from another shard)
|
||||
* @param listeners an optional set of listeners to add to the shard
|
||||
*/
|
||||
protected IndexShard newShard(ShardId shardId, boolean primary, IndexingOperationListener... listeners) throws IOException {
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, randomAsciiOfLength(5), primary,
|
||||
ShardRoutingState.INITIALIZING,
|
||||
primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
|
||||
return newShard(shardRouting, listeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard. The shard will will be put in its proper path under the
|
||||
* supplied node id.
|
||||
*
|
||||
* @param shardId the shard id to use
|
||||
* @param primary indicates whether to a primary shard (ready to recover from an empty store) or a replica
|
||||
* (ready to recover from another shard)
|
||||
*/
|
||||
protected IndexShard newShard(ShardId shardId, boolean primary, String nodeId, IndexMetaData indexMetaData,
|
||||
@Nullable IndexSearcherWrapper searcherWrapper) throws IOException {
|
||||
ShardRouting shardRouting = TestShardRouting.newShardRouting(shardId, nodeId, primary, ShardRoutingState.INITIALIZING,
|
||||
primary ? RecoverySource.StoreRecoverySource.EMPTY_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE);
|
||||
return newShard(shardRouting, indexMetaData, searcherWrapper);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard. The shard will will be put in its proper path under the
|
||||
* current node id the shard is assigned to.
|
||||
*
|
||||
* @param routing shard routing to use
|
||||
* @param indexMetaData indexMetaData for the shard, including any mapping
|
||||
* @param listeners an optional set of listeners to add to the shard
|
||||
*/
|
||||
protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData, IndexingOperationListener... listeners)
|
||||
throws IOException {
|
||||
return newShard(routing, indexMetaData, null, listeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard. The shard will will be put in its proper path under the
|
||||
* current node id the shard is assigned to.
|
||||
*
|
||||
* @param routing shard routing to use
|
||||
* @param indexMetaData indexMetaData for the shard, including any mapping
|
||||
* @param indexSearcherWrapper an optional wrapper to be used during searchers
|
||||
* @param listeners an optional set of listeners to add to the shard
|
||||
*/
|
||||
protected IndexShard newShard(ShardRouting routing, IndexMetaData indexMetaData,
|
||||
@Nullable IndexSearcherWrapper indexSearcherWrapper, IndexingOperationListener... listeners)
|
||||
throws IOException {
|
||||
// add node id as name to settings for popper logging
|
||||
final ShardId shardId = routing.shardId();
|
||||
final NodeEnvironment.NodePath nodePath = new NodeEnvironment.NodePath(createTempDir());
|
||||
ShardPath shardPath = new ShardPath(false, nodePath.resolve(shardId), nodePath.resolve(shardId), shardId);
|
||||
return newShard(routing, shardPath, indexMetaData, indexSearcherWrapper, listeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new initializing shard.
|
||||
*
|
||||
* @param routing shard routing to use
|
||||
* @param shardPath path to use for shard data
|
||||
* @param indexMetaData indexMetaData for the shard, including any mapping
|
||||
* @param indexSearcherWrapper an optional wrapper to be used during searchers
|
||||
* @param listeners an optional set of listeners to add to the shard
|
||||
*/
|
||||
protected IndexShard newShard(ShardRouting routing, ShardPath shardPath, IndexMetaData indexMetaData,
|
||||
@Nullable IndexSearcherWrapper indexSearcherWrapper,
|
||||
IndexingOperationListener... listeners) throws IOException {
|
||||
final Settings nodeSettings = Settings.builder().put("node.name", routing.currentNodeId()).build();
|
||||
final IndexSettings indexSettings = new IndexSettings(indexMetaData, nodeSettings);
|
||||
final IndexShard indexShard;
|
||||
final Store store = createStore(indexSettings, shardPath);
|
||||
boolean success = false;
|
||||
try {
|
||||
IndexCache indexCache = new IndexCache(indexSettings, new DisabledQueryCache(indexSettings), null);
|
||||
MapperService mapperService = MapperTestUtils.newMapperService(createTempDir(), indexSettings.getSettings());
|
||||
for (ObjectObjectCursor<String, MappingMetaData> typeMapping : indexMetaData.getMappings()) {
|
||||
mapperService.merge(typeMapping.key, typeMapping.value.source(), MapperService.MergeReason.MAPPING_RECOVERY, true);
|
||||
}
|
||||
SimilarityService similarityService = new SimilarityService(indexSettings, Collections.emptyMap());
|
||||
final IndexEventListener indexEventListener = new IndexEventListener() {
|
||||
};
|
||||
final Engine.Warmer warmer = searcher -> {
|
||||
};
|
||||
IndicesFieldDataCache indicesFieldDataCache = new IndicesFieldDataCache(nodeSettings, new IndexFieldDataCache.Listener() {
|
||||
});
|
||||
IndexFieldDataService indexFieldDataService = new IndexFieldDataService(indexSettings, indicesFieldDataCache,
|
||||
new NoneCircuitBreakerService(), mapperService);
|
||||
indexShard = new IndexShard(routing, indexSettings, shardPath, store, indexCache, mapperService, similarityService,
|
||||
indexFieldDataService, null, indexEventListener, indexSearcherWrapper, threadPool, BigArrays.NON_RECYCLING_INSTANCE, warmer,
|
||||
Collections.emptyList(), Arrays.asList(listeners));
|
||||
success = true;
|
||||
} finally {
|
||||
if (success == false) {
|
||||
IOUtils.close(store);
|
||||
}
|
||||
}
|
||||
return indexShard;
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes an existing shard, closes it and and starts a new initialing shard at the same location
|
||||
*
|
||||
* @param listeners new listerns to use for the newly created shard
|
||||
*/
|
||||
protected IndexShard reinitShard(IndexShard current, IndexingOperationListener... listeners) throws IOException {
|
||||
final ShardRouting shardRouting = current.routingEntry();
|
||||
return reinitShard(current, ShardRoutingHelper.initWithSameId(shardRouting,
|
||||
shardRouting.primary() ? RecoverySource.StoreRecoverySource.EXISTING_STORE_INSTANCE : RecoverySource.PeerRecoverySource.INSTANCE
|
||||
), listeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes an existing shard, closes it and and starts a new initialing shard at the same location
|
||||
*
|
||||
* @param routing the shard routing to use for the newly created shard.
|
||||
* @param listeners new listerns to use for the newly created shard
|
||||
*/
|
||||
protected IndexShard reinitShard(IndexShard current, ShardRouting routing, IndexingOperationListener... listeners) throws IOException {
|
||||
closeShards(current);
|
||||
return newShard(routing, current.shardPath(), current.indexSettings().getIndexMetaData(), null, listeners);
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new empyu shard and starts it. The shard will be either a replica or a primary.
|
||||
*/
|
||||
protected IndexShard newStartedShard() throws IOException {
|
||||
return newStartedShard(randomBoolean());
|
||||
}
|
||||
|
||||
/**
|
||||
* creates a new empty shard and starts it.
|
||||
*
|
||||
* @param primary controls whether the shard will be a primary or a replica.
|
||||
*/
|
||||
protected IndexShard newStartedShard(boolean primary) throws IOException {
|
||||
IndexShard shard = newShard(primary);
|
||||
if (primary) {
|
||||
recoveryShardFromStore(shard);
|
||||
} else {
|
||||
recoveryEmptyReplica(shard);
|
||||
}
|
||||
return shard;
|
||||
}
|
||||
|
||||
protected void closeShards(IndexShard... shards) throws IOException {
|
||||
closeShards(Arrays.asList(shards));
|
||||
}
|
||||
|
||||
protected void closeShards(Iterable<IndexShard> shards) throws IOException {
|
||||
for (IndexShard shard : shards) {
|
||||
if (shard != null) {
|
||||
try {
|
||||
shard.close("test", false);
|
||||
} finally {
|
||||
IOUtils.close(shard.store());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void recoveryShardFromStore(IndexShard primary) throws IOException {
|
||||
primary.markAsRecovering("store", new RecoveryState(primary.routingEntry(),
|
||||
getFakeDiscoNode(primary.routingEntry().currentNodeId()),
|
||||
null));
|
||||
primary.recoverFromStore();
|
||||
primary.updateRoutingEntry(ShardRoutingHelper.moveToStarted(primary.routingEntry()));
|
||||
}
|
||||
|
||||
protected void recoveryEmptyReplica(IndexShard replica) throws IOException {
|
||||
IndexShard primary = null;
|
||||
try {
|
||||
primary = newStartedShard(true);
|
||||
recoverReplica(replica, primary);
|
||||
} finally {
|
||||
closeShards(primary);
|
||||
}
|
||||
}
|
||||
|
||||
private DiscoveryNode getFakeDiscoNode(String id) {
|
||||
return new DiscoveryNode(id, new LocalTransportAddress("_fake_" + id), Version.CURRENT);
|
||||
}
|
||||
|
||||
/** recovers a replica from the given primary **/
|
||||
protected void recoverReplica(IndexShard replica, IndexShard primary) throws IOException {
|
||||
recoverReplica(replica, primary,
|
||||
(r, sourceNode) -> new RecoveryTarget(r, sourceNode, recoveryListener, version -> {
|
||||
}),
|
||||
true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Recovers a replica from the give primary, allow the user to supply a custom recovery target.
|
||||
* A typical usage of a custome recovery target is to assert things in the various stages of recovery
|
||||
*
|
||||
* @param markAsRecovering set to false if you have already marked the replica as recovering
|
||||
*/
|
||||
protected void recoverReplica(IndexShard replica, IndexShard primary,
|
||||
BiFunction<IndexShard, DiscoveryNode, RecoveryTarget> targetSupplier,
|
||||
boolean markAsRecovering)
|
||||
throws IOException {
|
||||
final DiscoveryNode pNode = getFakeDiscoNode(primary.routingEntry().currentNodeId());
|
||||
final DiscoveryNode rNode = getFakeDiscoNode(replica.routingEntry().currentNodeId());
|
||||
if (markAsRecovering) {
|
||||
replica.markAsRecovering("remote",
|
||||
new RecoveryState(replica.routingEntry(), pNode, rNode));
|
||||
} else {
|
||||
assertEquals(replica.state(), IndexShardState.RECOVERING);
|
||||
}
|
||||
replica.prepareForIndexRecovery();
|
||||
RecoveryTarget recoveryTarget = targetSupplier.apply(replica, pNode);
|
||||
StartRecoveryRequest request = new StartRecoveryRequest(replica.shardId(), pNode, rNode,
|
||||
getMetadataSnapshotOrEmpty(replica), false, 0);
|
||||
RecoverySourceHandler recovery = new RecoverySourceHandler(primary, recoveryTarget, request, () -> 0L, e -> () -> {
|
||||
},
|
||||
(int) ByteSizeUnit.MB.toKB(1), logger);
|
||||
recovery.recoverToTarget();
|
||||
recoveryTarget.markAsDone();
|
||||
replica.updateRoutingEntry(ShardRoutingHelper.moveToStarted(replica.routingEntry()));
|
||||
}
|
||||
|
||||
private Store.MetadataSnapshot getMetadataSnapshotOrEmpty(IndexShard replica) throws IOException {
|
||||
Store.MetadataSnapshot result;
|
||||
try {
|
||||
result = replica.snapshotStoreMetadata();
|
||||
} catch (IndexNotFoundException e) {
|
||||
// OK!
|
||||
result = Store.MetadataSnapshot.EMPTY;
|
||||
} catch (IOException e) {
|
||||
logger.warn("failed read store, treating as empty", e);
|
||||
result = Store.MetadataSnapshot.EMPTY;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
protected Set<Uid> getShardDocUIDs(final IndexShard shard) throws IOException {
|
||||
shard.refresh("get_uids");
|
||||
try (Engine.Searcher searcher = shard.acquireSearcher("test")) {
|
||||
Set<Uid> ids = new HashSet<>();
|
||||
for (LeafReaderContext leafContext : searcher.reader().leaves()) {
|
||||
LeafReader reader = leafContext.reader();
|
||||
Bits liveDocs = reader.getLiveDocs();
|
||||
for (int i = 0; i < reader.maxDoc(); i++) {
|
||||
if (liveDocs == null || liveDocs.get(i)) {
|
||||
Document uuid = reader.document(i, Collections.singleton(UidFieldMapper.NAME));
|
||||
ids.add(Uid.createUid(uuid.get(UidFieldMapper.NAME)));
|
||||
}
|
||||
}
|
||||
}
|
||||
return ids;
|
||||
}
|
||||
}
|
||||
|
||||
protected void assertDocCount(IndexShard shard, int docDount) throws IOException {
|
||||
assertThat(getShardDocUIDs(shard), hasSize(docDount));
|
||||
}
|
||||
|
||||
protected void assertDocs(IndexShard shard, Uid... uids) throws IOException {
|
||||
final Set<Uid> shardDocUIDs = getShardDocUIDs(shard);
|
||||
assertThat(shardDocUIDs, contains(uids));
|
||||
assertThat(shardDocUIDs, hasSize(uids.length));
|
||||
}
|
||||
|
||||
|
||||
protected Engine.Index indexDoc(IndexShard shard, String type, String id) {
|
||||
return indexDoc(shard, type, id, "{}");
|
||||
}
|
||||
|
||||
protected Engine.Index indexDoc(IndexShard shard, String type, String id, String source) {
|
||||
final Engine.Index index;
|
||||
if (shard.routingEntry().primary()) {
|
||||
index = shard.prepareIndexOnPrimary(
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)),
|
||||
Versions.MATCH_ANY, VersionType.INTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
} else {
|
||||
index = shard.prepareIndexOnReplica(
|
||||
SourceToParse.source(SourceToParse.Origin.PRIMARY, shard.shardId().getIndexName(), type, id, new BytesArray(source)),
|
||||
1, VersionType.EXTERNAL, IndexRequest.UNSET_AUTO_GENERATED_TIMESTAMP, false);
|
||||
}
|
||||
shard.index(index);
|
||||
return index;
|
||||
}
|
||||
|
||||
protected Engine.Delete deleteDoc(IndexShard shard, String type, String id) {
|
||||
final Engine.Delete delete;
|
||||
if (shard.routingEntry().primary()) {
|
||||
delete = shard.prepareDeleteOnPrimary(type, id, Versions.MATCH_ANY, VersionType.INTERNAL);
|
||||
} else {
|
||||
delete = shard.prepareDeleteOnPrimary(type, id, 1, VersionType.EXTERNAL);
|
||||
}
|
||||
shard.delete(delete);
|
||||
return delete;
|
||||
}
|
||||
|
||||
protected void flushShard(IndexShard shard) {
|
||||
flushShard(shard, false);
|
||||
}
|
||||
|
||||
protected void flushShard(IndexShard shard, boolean force) {
|
||||
shard.flush(new FlushRequest(shard.shardId().getIndexName()).force(force));
|
||||
}
|
||||
}
|
|
@ -19,18 +19,21 @@
|
|||
|
||||
package org.elasticsearch.node;
|
||||
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.common.util.MockBigArrays;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.breaker.CircuitBreakerService;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.plugins.SearchPlugin;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.MockSearchService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
* A node for testing which allows:
|
||||
|
@ -62,11 +65,15 @@ public class MockNode extends Node {
|
|||
return new MockBigArrays(settings, circuitBreakerService);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected Class<? extends SearchService> pickSearchServiceImplementation() {
|
||||
protected SearchService newSearchService(ClusterService clusterService, IndicesService indicesService,
|
||||
ThreadPool threadPool, ScriptService scriptService, BigArrays bigArrays,
|
||||
FetchPhase fetchPhase) {
|
||||
if (getPluginsService().filterPlugins(MockSearchService.TestPlugin.class).isEmpty()) {
|
||||
return super.pickSearchServiceImplementation();
|
||||
return super.newSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
|
||||
}
|
||||
return MockSearchService.class;
|
||||
return new MockSearchService(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,9 +20,6 @@
|
|||
package org.elasticsearch.search;
|
||||
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.node.MockNode;
|
||||
|
@ -69,11 +66,10 @@ public class MockSearchService extends SearchService {
|
|||
ACTIVE_SEARCH_CONTEXTS.remove(context);
|
||||
}
|
||||
|
||||
@Inject
|
||||
public MockSearchService(Settings settings, ClusterSettings clusterSettings, ClusterService clusterService,
|
||||
public MockSearchService(ClusterService clusterService,
|
||||
IndicesService indicesService, ThreadPool threadPool, ScriptService scriptService,
|
||||
BigArrays bigArrays, FetchPhase fetchPhase) {
|
||||
super(settings, clusterSettings, clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
|
||||
super(clusterService, indicesService, threadPool, scriptService, bigArrays, fetchPhase);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -1045,12 +1045,7 @@ public abstract class AbstractQueryTestCase<QB extends AbstractQueryBuilder<QB>>
|
|||
scriptSettings.addAll(pluginsService.getPluginSettings());
|
||||
scriptSettings.add(InternalSettingsPlugin.VERSION_CREATED);
|
||||
SettingsModule settingsModule = new SettingsModule(nodeSettings, scriptSettings, pluginsService.getPluginSettingsFilter());
|
||||
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class)) {
|
||||
@Override
|
||||
protected void configureSearch() {
|
||||
// Skip me
|
||||
}
|
||||
};
|
||||
searchModule = new SearchModule(nodeSettings, false, pluginsService.filterPlugins(SearchPlugin.class));
|
||||
IndicesModule indicesModule = new IndicesModule(pluginsService.filterPlugins(MapperPlugin.class)) {
|
||||
@Override
|
||||
public void configure() {
|
||||
|
|
|
@ -42,11 +42,16 @@ import static junit.framework.TestCase.fail;
|
|||
public class ClusterServiceUtils {
|
||||
|
||||
public static ClusterService createClusterService(ThreadPool threadPool) {
|
||||
DiscoveryNode discoveryNode = new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT);
|
||||
return createClusterService(threadPool, discoveryNode);
|
||||
}
|
||||
|
||||
public static ClusterService createClusterService(ThreadPool threadPool, DiscoveryNode localNode) {
|
||||
ClusterService clusterService = new ClusterService(Settings.builder().put("cluster.name", "ClusterServiceTests").build(),
|
||||
new ClusterSettings(Settings.EMPTY, ClusterSettings.BUILT_IN_CLUSTER_SETTINGS),
|
||||
threadPool);
|
||||
clusterService.setLocalNode(new DiscoveryNode("node", LocalTransportAddress.buildUnique(), Collections.emptyMap(),
|
||||
new HashSet<>(Arrays.asList(DiscoveryNode.Role.values())),Version.CURRENT));
|
||||
clusterService.setLocalNode(localNode);
|
||||
clusterService.setNodeConnectionsService(new NodeConnectionsService(Settings.EMPTY, null, null) {
|
||||
@Override
|
||||
public void connectToAddedNodes(ClusterChangedEvent event) {
|
||||
|
|
|
@ -2064,8 +2064,8 @@ public abstract class ESIntegTestCase extends ESTestCase {
|
|||
}
|
||||
throw new IllegalStateException(builder.toString());
|
||||
}
|
||||
Path src = list[0];
|
||||
Path dest = dataDir.resolve(internalCluster().getClusterName());
|
||||
Path src = list[0].resolve(NodeEnvironment.NODES_FOLDER);
|
||||
Path dest = dataDir.resolve(NodeEnvironment.NODES_FOLDER);
|
||||
assertTrue(Files.exists(src));
|
||||
Files.move(src, dest);
|
||||
assertFalse(Files.exists(src));
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue