Merge branch 'master' into feature-suggest-refactoring
Conflicts: docs/reference/migration/migrate_5_0.asciidoc
This commit is contained in:
commit
a7053afdb9
|
@ -51,6 +51,11 @@ public class PluginBuildPlugin extends BuildPlugin {
|
|||
project.integTest.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
project.tasks.run.clusterConfig.plugin(name, project.bundlePlugin.outputs.files)
|
||||
}
|
||||
|
||||
project.namingConventions {
|
||||
// Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT.
|
||||
skipIntegTestInDisguise = true
|
||||
}
|
||||
}
|
||||
createIntegTestTask(project)
|
||||
createBundleTask(project)
|
||||
|
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.gradle.precommit
|
||||
|
||||
import org.elasticsearch.gradle.LoggedExec
|
||||
import org.elasticsearch.gradle.VersionProperties
|
||||
import org.gradle.api.file.FileCollection
|
||||
import org.gradle.api.tasks.Input
|
||||
import org.gradle.api.tasks.InputFiles
|
||||
import org.gradle.api.tasks.OutputFile
|
||||
|
||||
/**
|
||||
* Runs NamingConventionsCheck on a classpath/directory combo to verify that
|
||||
* tests are named according to our conventions so they'll be picked up by
|
||||
* gradle. Read the Javadoc for NamingConventionsCheck to learn more.
|
||||
*/
|
||||
public class NamingConventionsTask extends LoggedExec {
|
||||
/**
|
||||
* We use a simple "marker" file that we touch when the task succeeds
|
||||
* as the task output. This is compared against the modified time of the
|
||||
* inputs (ie the jars/class files).
|
||||
*/
|
||||
@OutputFile
|
||||
File successMarker = new File(project.buildDir, 'markers/namingConventions')
|
||||
|
||||
/**
|
||||
* The classpath to run the naming conventions checks against. Must contain the files in the test
|
||||
* output directory and everything required to load those classes.
|
||||
*
|
||||
* We don't declare the actual test files as a dependency or input because if they change then
|
||||
* this will change.
|
||||
*/
|
||||
@InputFiles
|
||||
FileCollection classpath = project.sourceSets.test.runtimeClasspath
|
||||
|
||||
/**
|
||||
* Should we skip the integ tests in disguise tests? Defaults to true because only core names its
|
||||
* integ tests correctly.
|
||||
*/
|
||||
@Input
|
||||
boolean skipIntegTestInDisguise = false
|
||||
|
||||
public NamingConventionsTask() {
|
||||
dependsOn(classpath)
|
||||
description = "Runs NamingConventionsCheck on ${classpath}"
|
||||
executable = new File(project.javaHome, 'bin/java')
|
||||
onlyIf { project.sourceSets.test.output.classesDir.exists() }
|
||||
/*
|
||||
* We build the arguments in a funny afterEvaluate/doFirst closure so that we can wait for the classpath to be
|
||||
* ready for us. Strangely neither one on their own are good enough.
|
||||
*/
|
||||
project.afterEvaluate {
|
||||
doFirst {
|
||||
args('-cp', classpath.asPath, 'org.elasticsearch.test.NamingConventionsCheck')
|
||||
if (skipIntegTestInDisguise) {
|
||||
args('--skip-integ-tests-in-disguise')
|
||||
}
|
||||
/*
|
||||
* The test framework has classes that fail the checks to validate that the checks fail properly.
|
||||
* Since these would cause the build to fail we have to ignore them with this parameter. The
|
||||
* process of ignoring them lets us validate that they were found so this ignore parameter acts
|
||||
* as the test for the NamingConventionsCheck.
|
||||
*/
|
||||
if (':test:framework'.equals(project.path)) {
|
||||
args('--self-test')
|
||||
}
|
||||
args('--', project.sourceSets.test.output.classesDir.absolutePath)
|
||||
}
|
||||
}
|
||||
doLast { successMarker.setText("", 'UTF-8') }
|
||||
}
|
||||
}
|
|
@ -33,6 +33,7 @@ class PrecommitTasks {
|
|||
List<Task> precommitTasks = [
|
||||
configureForbiddenApis(project),
|
||||
configureCheckstyle(project),
|
||||
configureNamingConventions(project),
|
||||
project.tasks.create('forbiddenPatterns', ForbiddenPatternsTask.class),
|
||||
project.tasks.create('licenseHeaders', LicenseHeadersTask.class),
|
||||
project.tasks.create('jarHell', JarHellTask.class),
|
||||
|
@ -109,4 +110,11 @@ class PrecommitTasks {
|
|||
}
|
||||
return checkstyleTask
|
||||
}
|
||||
|
||||
private static Task configureNamingConventions(Project project) {
|
||||
if (project.sourceSets.findByName("test")) {
|
||||
return project.tasks.create('namingConventions', NamingConventionsTask)
|
||||
}
|
||||
return null
|
||||
}
|
||||
}
|
||||
|
|
|
@ -207,16 +207,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]ShardSearchFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportClearScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportMultiSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]TransportSearchScrollAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchDfsQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchDfsQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchHelper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchScrollQueryAndFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchScrollQueryThenFetchAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]search[/\\]type[/\\]TransportSearchTypeAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]SuggestResponse.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]suggest[/\\]TransportSuggestAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]action[/\\]support[/\\]ActionFilter.java" checks="LineLength" />
|
||||
|
@ -1529,7 +1519,6 @@
|
|||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IcuNormalizerTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]IndexableBinaryStringTools.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]AnalysisTestUtils.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-icu[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]TestIndexableBinaryStringTools.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]JapaneseStopTokenFilterFactory.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-kuromoji[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]KuromojiAnalysisTests.java" checks="LineLength" />
|
||||
<suppress files="plugins[/\\]analysis-phonetic[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]analysis[/\\]PhoneticTokenFilterFactory.java" checks="LineLength" />
|
||||
|
@ -1610,7 +1599,6 @@
|
|||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]cli[/\\]CheckFileCommandTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]evil-tests[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]tribe[/\\]TribeUnitTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-client[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]smoketest[/\\]ESSmokeClientTestCase.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]AbstractMustacheTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]CombineProcessorsTests.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestDocumentMustacheIT.java" checks="LineLength" />
|
||||
<suppress files="qa[/\\]smoke-test-ingest-with-all-dependencies[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]ingest[/\\]IngestMustacheSetProcessorIT.java" checks="LineLength" />
|
||||
|
|
|
@ -174,12 +174,6 @@ import org.elasticsearch.action.search.TransportClearScrollAction;
|
|||
import org.elasticsearch.action.search.TransportMultiSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.suggest.SuggestAction;
|
||||
import org.elasticsearch.action.suggest.TransportSuggestAction;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
|
@ -333,16 +327,8 @@ public class ActionModule extends AbstractModule {
|
|||
TransportShardMultiGetAction.class);
|
||||
registerAction(BulkAction.INSTANCE, TransportBulkAction.class,
|
||||
TransportShardBulkAction.class);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class,
|
||||
TransportSearchDfsQueryThenFetchAction.class,
|
||||
TransportSearchQueryThenFetchAction.class,
|
||||
TransportSearchDfsQueryAndFetchAction.class,
|
||||
TransportSearchQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class,
|
||||
TransportSearchScrollQueryThenFetchAction.class,
|
||||
TransportSearchScrollQueryAndFetchAction.class
|
||||
);
|
||||
registerAction(SearchAction.INSTANCE, TransportSearchAction.class);
|
||||
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
|
||||
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
|
||||
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
|
||||
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
/**
|
||||
* Base implementation for an async action.
|
||||
*/
|
||||
public class AbstractAsyncAction {
|
||||
abstract class AbstractAsyncAction {
|
||||
|
||||
private final long startTime;
|
||||
|
||||
|
@ -46,4 +46,5 @@ public class AbstractAsyncAction {
|
|||
return Math.max(1, System.currentTimeMillis() - startTime);
|
||||
}
|
||||
|
||||
abstract void start();
|
||||
}
|
|
@ -0,0 +1,393 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalSearchRequest;
|
||||
|
||||
abstract class AbstractSearchAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ESLogger logger;
|
||||
protected final SearchServiceTransportAction searchService;
|
||||
private final IndexNameExpressionResolver indexNameExpressionResolver;
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
protected final ThreadPool threadPool;
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
protected final GroupShardsIterator shardsIts;
|
||||
protected final SearchRequest request;
|
||||
protected final ClusterState clusterState;
|
||||
protected final DiscoveryNodes nodes;
|
||||
protected final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
protected final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
protected AbstractSearchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService, ClusterService clusterService,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool, SearchRequest request,
|
||||
ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchService = searchService;
|
||||
this.indexNameExpressionResolver = indexNameExpressionResolver;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.threadPool = threadPool;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
this.clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(),
|
||||
startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
}
|
||||
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(),
|
||||
request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
//no search shards to search on, bail with empty response
|
||||
//(it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(),
|
||||
ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performFirstPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
final DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState,
|
||||
shard.index().getName(), request.indices());
|
||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases,
|
||||
startTime()), new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared " +
|
||||
"to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId,
|
||||
final ShardIterator shardIt, Throwable t) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
addShardFailure(shardIndex, shardTarget, t);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
|
||||
} else {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Throwable t1) {
|
||||
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request,
|
||||
boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardSearchFailure failure = shardFailures.get(shardIndex);
|
||||
if (failure == null) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
} else {
|
||||
// the failure is already present, try and not override it with an exception that is less meaningless
|
||||
// for example, getting illegal shard state
|
||||
if (TransportActions.isReadOverrideException(t)) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Throwable t) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
|
||||
AtomicArray<IntArrayList> docIdsToLoad) {
|
||||
if (docIdsToLoad == null) {
|
||||
return;
|
||||
}
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.queryResult().id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
|
||||
if (node != null) {
|
||||
searchService.sendFreeContext(node, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry,
|
||||
ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
firstResults.set(shardIndex, result);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final void innerMoveToSecondPhase() throws Exception {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < firstResults.length(); i++) {
|
||||
FirstResult result = firstResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
|
||||
}
|
||||
moveToSecondPhase();
|
||||
}
|
||||
|
||||
protected abstract void moveToSecondPhase() throws Exception;
|
||||
|
||||
protected abstract String firstPhaseName();
|
||||
}
|
|
@ -17,14 +17,14 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class ParsedScrollId {
|
||||
class ParsedScrollId {
|
||||
|
||||
public static final String QUERY_THEN_FETCH_TYPE = "queryThenFetch";
|
||||
|
|
@ -17,9 +17,9 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
public class ScrollIdForNode {
|
||||
class ScrollIdForNode {
|
||||
private final String node;
|
||||
private final long scrollId;
|
||||
|
|
@ -0,0 +1,142 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchDfsQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
SearchDfsQueryAndFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
|
||||
}
|
||||
}
|
||||
|
||||
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
|
||||
final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
|
||||
searchService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QueryFetchSearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryFetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
|
@ -0,0 +1,223 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchDfsQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<DfsSearchResult> {
|
||||
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchDfsQueryThenFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter,
|
||||
final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
|
||||
searchService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected
|
||||
// execution) and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetchPhase() {
|
||||
try {
|
||||
innerExecuteFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex,
|
||||
SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
class SearchQueryAndFetchAsyncAction extends AbstractSearchAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
SearchQueryAndFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchService.sendExecuteFetch(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,157 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
class SearchQueryThenFetchAsyncAction extends AbstractSearchAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
SearchQueryThenFetchAsyncAction(ESLogger logger, SearchServiceTransportAction searchService,
|
||||
ClusterService clusterService, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
SearchPhaseController searchPhaseController, ThreadPool threadPool,
|
||||
SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(logger, searchService, clusterService, indexNameExpressionResolver, searchPhaseController, threadPool, request, listener);
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request,
|
||||
ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchService.sendExecuteQuery(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter,
|
||||
final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget,
|
||||
AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps,
|
||||
successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryAndFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
SearchScrollQueryAndFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchService = searchService;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
|
@ -0,0 +1,226 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
class SearchScrollQueryThenFetchAsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final ESLogger logger;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
SearchScrollQueryThenFetchAsyncAction(ESLogger logger, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.logger = logger;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(entry.index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ScrollIdForNode;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -41,7 +40,7 @@ import java.util.List;
|
|||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*/
|
||||
|
|
|
@ -20,10 +20,6 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
|
@ -33,13 +29,14 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static org.elasticsearch.action.search.SearchType.DFS_QUERY_THEN_FETCH;
|
||||
import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
||||
|
||||
/**
|
||||
|
@ -48,25 +45,18 @@ import static org.elasticsearch.action.search.SearchType.QUERY_AND_FETCH;
|
|||
public class TransportSearchAction extends HandledTransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction;
|
||||
private final TransportSearchQueryThenFetchAction queryThenFetchAction;
|
||||
private final TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction;
|
||||
private final TransportSearchQueryAndFetchAction queryAndFetchAction;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool,
|
||||
TransportService transportService, ClusterService clusterService,
|
||||
TransportSearchDfsQueryThenFetchAction dfsQueryThenFetchAction,
|
||||
TransportSearchQueryThenFetchAction queryThenFetchAction,
|
||||
TransportSearchDfsQueryAndFetchAction dfsQueryAndFetchAction,
|
||||
TransportSearchQueryAndFetchAction queryAndFetchAction,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
|
||||
TransportService transportService, SearchServiceTransportAction searchService,
|
||||
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchService = searchService;
|
||||
this.clusterService = clusterService;
|
||||
this.dfsQueryThenFetchAction = dfsQueryThenFetchAction;
|
||||
this.queryThenFetchAction = queryThenFetchAction;
|
||||
this.dfsQueryAndFetchAction = dfsQueryAndFetchAction;
|
||||
this.queryAndFetchAction = queryAndFetchAction;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -75,7 +65,8 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
try {
|
||||
ClusterState clusterState = clusterService.state();
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, searchRequest);
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, searchRequest.routing(), searchRequest.indices());
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState,
|
||||
searchRequest.routing(), searchRequest.indices());
|
||||
int shardCount = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, routingMap);
|
||||
if (shardCount == 1) {
|
||||
// if we only have one group, then we always want Q_A_F, no need for DFS, and no need to do THEN since we hit one shard
|
||||
|
@ -86,16 +77,28 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
} catch (Exception e) {
|
||||
logger.debug("failed to optimize search type, continue as normal", e);
|
||||
}
|
||||
if (searchRequest.searchType() == DFS_QUERY_THEN_FETCH) {
|
||||
dfsQueryThenFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.QUERY_THEN_FETCH) {
|
||||
queryThenFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
dfsQueryAndFetchAction.execute(searchRequest, listener);
|
||||
} else if (searchRequest.searchType() == SearchType.QUERY_AND_FETCH) {
|
||||
queryAndFetchAction.execute(searchRequest, listener);
|
||||
} else {
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
|
||||
AbstractSearchAsyncAction searchAsyncAction;
|
||||
switch(searchRequest.searchType()) {
|
||||
case DFS_QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryThenFetchAsyncAction(logger, searchService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case QUERY_THEN_FETCH:
|
||||
searchAsyncAction = new SearchQueryThenFetchAsyncAction(logger, searchService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case DFS_QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchDfsQueryAndFetchAsyncAction(logger, searchService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH:
|
||||
searchAsyncAction = new SearchQueryAndFetchAsyncAction(logger, searchService, clusterService,
|
||||
indexNameExpressionResolver, searchPhaseController, threadPool, searchRequest, listener);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalStateException("Unknown search type: [" + searchRequest.searchType() + "]");
|
||||
}
|
||||
searchAsyncAction.start();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,13 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
import org.apache.lucene.util.CharsRefBuilder;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.SearchType;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
@ -42,17 +39,19 @@ import static java.util.Collections.emptyMap;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportSearchHelper {
|
||||
final class TransportSearchHelper {
|
||||
|
||||
public static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request, String[] filteringAliases, long nowInMillis) {
|
||||
static ShardSearchTransportRequest internalSearchRequest(ShardRouting shardRouting, int numberOfShards, SearchRequest request,
|
||||
String[] filteringAliases, long nowInMillis) {
|
||||
return new ShardSearchTransportRequest(request, shardRouting, numberOfShards, filteringAliases, nowInMillis);
|
||||
}
|
||||
|
||||
public static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
|
||||
static InternalScrollSearchRequest internalScrollSearchRequest(long id, SearchScrollRequest request) {
|
||||
return new InternalScrollSearchRequest(request, id);
|
||||
}
|
||||
|
||||
public static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(SearchType searchType, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
if (searchType == SearchType.DFS_QUERY_THEN_FETCH || searchType == SearchType.QUERY_THEN_FETCH) {
|
||||
return buildScrollId(ParsedScrollId.QUERY_THEN_FETCH_TYPE, searchPhaseResults, attributes);
|
||||
} else if (searchType == SearchType.QUERY_AND_FETCH || searchType == SearchType.DFS_QUERY_AND_FETCH) {
|
||||
|
@ -62,7 +61,8 @@ public abstract class TransportSearchHelper {
|
|||
}
|
||||
}
|
||||
|
||||
public static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults, @Nullable Map<String, String> attributes) throws IOException {
|
||||
static String buildScrollId(String type, AtomicArray<? extends SearchPhaseResult> searchPhaseResults,
|
||||
@Nullable Map<String, String> attributes) throws IOException {
|
||||
StringBuilder sb = new StringBuilder().append(type).append(';');
|
||||
sb.append(searchPhaseResults.asList().size()).append(';');
|
||||
for (AtomicArray.Entry<? extends SearchPhaseResult> entry : searchPhaseResults.asList()) {
|
||||
|
@ -81,7 +81,7 @@ public abstract class TransportSearchHelper {
|
|||
return Base64.encodeBytes(bytesRef.bytes, bytesRef.offset, bytesRef.length, Base64.URL_SAFE);
|
||||
}
|
||||
|
||||
public static ParsedScrollId parseScrollId(String scrollId) {
|
||||
static ParsedScrollId parseScrollId(String scrollId) {
|
||||
CharsRefBuilder spare = new CharsRefBuilder();
|
||||
try {
|
||||
byte[] decode = Base64.decode(scrollId, Base64.URL_SAFE);
|
||||
|
@ -128,5 +128,4 @@ public abstract class TransportSearchHelper {
|
|||
private TransportSearchHelper() {
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -20,51 +20,60 @@
|
|||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ParsedScrollId;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import static org.elasticsearch.action.search.type.ParsedScrollId.QUERY_AND_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.type.ParsedScrollId.QUERY_THEN_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.parseScrollId;
|
||||
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_AND_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.ParsedScrollId.QUERY_THEN_FETCH_TYPE;
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollAction extends HandledTransportAction<SearchScrollRequest, SearchResponse> {
|
||||
|
||||
private final TransportSearchScrollQueryThenFetchAction queryThenFetchAction;
|
||||
private final TransportSearchScrollQueryAndFetchAction queryAndFetchAction;
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
TransportSearchScrollQueryThenFetchAction queryThenFetchAction,
|
||||
TransportSearchScrollQueryAndFetchAction queryAndFetchAction,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchScrollRequest::new);
|
||||
this.queryThenFetchAction = queryThenFetchAction;
|
||||
this.queryAndFetchAction = queryAndFetchAction;
|
||||
ClusterService clusterService, SearchServiceTransportAction searchService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchScrollRequest request, ActionListener<SearchResponse> listener) {
|
||||
try {
|
||||
ParsedScrollId scrollId = parseScrollId(request.scrollId());
|
||||
if (scrollId.getType().equals(QUERY_THEN_FETCH_TYPE)) {
|
||||
queryThenFetchAction.execute(request, scrollId, listener);
|
||||
} else if (scrollId.getType().equals(QUERY_AND_FETCH_TYPE)) {
|
||||
queryAndFetchAction.execute(request, scrollId, listener);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
AbstractAsyncAction action;
|
||||
switch (scrollId.getType()) {
|
||||
case QUERY_THEN_FETCH_TYPE:
|
||||
action = new SearchScrollQueryThenFetchAsyncAction(logger, clusterService, searchService,
|
||||
searchPhaseController, request, scrollId, listener);
|
||||
break;
|
||||
case QUERY_AND_FETCH_TYPE:
|
||||
action = new SearchScrollQueryAndFetchAsyncAction(logger, clusterService, searchService,
|
||||
searchPhaseController, request, scrollId, listener);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("Scroll id type [" + scrollId.getType() + "] unrecognized");
|
||||
}
|
||||
action.start();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
|
|
|
@ -1,158 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchDfsQueryAndFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchDfsQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
|
||||
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
queryFetchResults = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeSecondPhase(entry.index, dfsResult, counter, node, querySearchRequest);
|
||||
}
|
||||
}
|
||||
|
||||
void executeSecondPhase(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final DiscoveryNode node, final QuerySearchRequest querySearchRequest) {
|
||||
searchService.sendExecuteFetch(node, querySearchRequest, new ActionListener<QueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QueryFetchSearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryFetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onSecondPhaseFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("query_fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(t);
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,239 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchDfsQueryThenFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchDfsQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<DfsSearchResult> {
|
||||
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
queryResults = new AtomicArray<>(firstResults.length());
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "dfs";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<DfsSearchResult> listener) {
|
||||
searchService.sendExecuteDfs(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() {
|
||||
final AggregatedDfs dfs = searchPhaseController.aggregateDfs(firstResults);
|
||||
final AtomicInteger counter = new AtomicInteger(firstResults.asList().size());
|
||||
for (final AtomicArray.Entry<DfsSearchResult> entry : firstResults.asList()) {
|
||||
DfsSearchResult dfsResult = entry.value;
|
||||
DiscoveryNode node = nodes.get(dfsResult.shardTarget().nodeId());
|
||||
QuerySearchRequest querySearchRequest = new QuerySearchRequest(request, dfsResult.id(), dfs);
|
||||
executeQuery(entry.index, dfsResult, counter, querySearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeQuery(final int shardIndex, final DfsSearchResult dfsResult, final AtomicInteger counter, final QuerySearchRequest querySearchRequest, final DiscoveryNode node) {
|
||||
searchService.sendExecuteQuery(node, querySearchRequest, new ActionListener<QuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(QuerySearchResult result) {
|
||||
result.shardTarget(dfsResult.shardTarget());
|
||||
queryResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
onQueryFailure(t, querySearchRequest, shardIndex, dfsResult, counter);
|
||||
} finally {
|
||||
// the query might not have been executed at all (for example because thread pool rejected execution)
|
||||
// and the search context that was created in dfs phase might not be released.
|
||||
// release it again to be in the safe side
|
||||
sendReleaseSearchContext(querySearchRequest.id(), node);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shardIndex, DfsSearchResult dfsResult, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", buildShardFailures()));
|
||||
} else {
|
||||
executeFetchPhase();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetchPhase() {
|
||||
try {
|
||||
innerExecuteFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("query", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
void innerExecuteFetchPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, queryResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResult queryResult = queryResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult, entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(queryResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,104 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.buildScrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchQueryAndFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchQueryAndFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<QueryFetchSearchResult> {
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query_fetch";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QueryFetchSearchResult> listener) {
|
||||
searchService.sendExecuteFetch(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
firstResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,173 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionRunnable;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchQueryThenFetchAction extends TransportSearchTypeAction {
|
||||
|
||||
@Inject
|
||||
public TransportSearchQueryThenFetchAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, threadPool, clusterService, searchService, searchPhaseController, actionFilters, indexNameExpressionResolver);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doExecute(SearchRequest searchRequest, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(searchRequest, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends BaseAsyncAction<QuerySearchResultProvider> {
|
||||
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
final AtomicArray<IntArrayList> docIdsToLoad;
|
||||
|
||||
private AsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
super(request, listener);
|
||||
fetchResults = new AtomicArray<>(firstResults.length());
|
||||
docIdsToLoad = new AtomicArray<>(firstResults.length());
|
||||
}
|
||||
|
||||
@Override
|
||||
protected String firstPhaseName() {
|
||||
return "query";
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<QuerySearchResultProvider> listener) {
|
||||
searchService.sendExecuteQuery(node, request, listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void moveToSecondPhase() throws Exception {
|
||||
boolean useScroll = request.scroll() != null;
|
||||
sortedShardList = searchPhaseController.sortDocs(useScroll, firstResults);
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(
|
||||
request, sortedShardList, firstResults.length()
|
||||
);
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
QuerySearchResultProvider queryResult = firstResults.get(entry.index);
|
||||
DiscoveryNode node = nodes.get(queryResult.shardTarget().nodeId());
|
||||
ShardFetchSearchRequest fetchSearchRequest = createFetchRequest(queryResult.queryResult(), entry, lastEmittedDocPerShard);
|
||||
executeFetch(entry.index, queryResult.shardTarget(), counter, fetchSearchRequest, node);
|
||||
}
|
||||
}
|
||||
|
||||
void executeFetch(final int shardIndex, final SearchShardTarget shardTarget, final AtomicInteger counter, final ShardFetchSearchRequest fetchSearchRequest, DiscoveryNode node) {
|
||||
searchService.sendExecuteFetch(node, fetchSearchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(shardTarget);
|
||||
fetchResults.set(shardIndex, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
// the search context might not be cleared on the node where the fetch was executed for example
|
||||
// because the action was rejected by the thread pool. in this case we need to send a dedicated
|
||||
// request to clear the search context. by setting docIdsToLoad to null, the context will be cleared
|
||||
// in TransportSearchTypeAction.releaseIrrelevantSearchContexts() after the search request is done.
|
||||
docIdsToLoad.set(shardIndex, null);
|
||||
onFetchFailure(t, fetchSearchRequest, shardIndex, shardTarget, counter);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
|
||||
}
|
||||
this.addShardFailure(shardIndex, shardTarget, t);
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
threadPool.executor(ThreadPool.Names.SEARCH).execute(new ActionRunnable<SearchResponse>(listener) {
|
||||
@Override
|
||||
public void doRun() throws IOException {
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, firstResults,
|
||||
fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
try {
|
||||
ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", t, buildShardFailures());
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("failed to reduce search", failure);
|
||||
}
|
||||
super.onFailure(failure);
|
||||
} finally {
|
||||
releaseIrrelevantSearchContexts(firstResults, docIdsToLoad);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,205 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollQueryAndFetchAction(Settings settings, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(request, scrollId, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final SearchScrollRequest request;
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
private final ParsedScrollId scrollId;
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final AtomicArray<QueryFetchSearchResult> queryFetchResults;
|
||||
|
||||
private final AtomicInteger successfulOps;
|
||||
private final AtomicInteger counter;
|
||||
|
||||
private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
this.counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryFetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void executePhase(final int shardIndex, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteFetch(node, internalRequest, new ActionListener<ScrollQueryFetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQueryFetchSearchResult result) {
|
||||
queryFetchResults.set(shardIndex, result.result());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onPhaseFailure(t, searchId, shardIndex);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
private void onPhaseFailure(Throwable t, long searchId, int shardIndex) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query_fetch", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() throws Exception {
|
||||
ScoreDoc[] sortedShardList = searchPhaseController.sortDocs(true, queryFetchResults);
|
||||
final InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryFetchResults,
|
||||
queryFetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,255 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.SearchScrollRequest;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalScrollSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final SearchServiceTransportAction searchService;
|
||||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollQueryThenFetchAction(Settings settings, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
public void execute(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
new AsyncAction(request, scrollId, listener).start();
|
||||
}
|
||||
|
||||
private class AsyncAction extends AbstractAsyncAction {
|
||||
|
||||
private final SearchScrollRequest request;
|
||||
|
||||
private final ActionListener<SearchResponse> listener;
|
||||
|
||||
private final ParsedScrollId scrollId;
|
||||
|
||||
private final DiscoveryNodes nodes;
|
||||
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
final AtomicArray<QuerySearchResult> queryResults;
|
||||
final AtomicArray<FetchSearchResult> fetchResults;
|
||||
|
||||
private volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
private final AtomicInteger successfulOps;
|
||||
|
||||
private AsyncAction(SearchScrollRequest request, ParsedScrollId scrollId, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
this.scrollId = scrollId;
|
||||
this.nodes = clusterService.state().nodes();
|
||||
this.successfulOps = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
this.queryResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
this.fetchResults = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
// we do our best to return the shard failures, but its ok if its not fully concurrently safe
|
||||
// we simply try and return as much as possible
|
||||
protected final void addShardFailure(final int shardIndex, ShardSearchFailure failure) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(scrollId.getContext().length);
|
||||
}
|
||||
shardFailures.set(shardIndex, failure);
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (scrollId.getContext().length == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "no nodes to search on", ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeQueryPhase(final int shardIndex, final AtomicInteger counter, DiscoveryNode node, final long searchId) {
|
||||
InternalScrollSearchRequest internalRequest = internalScrollSearchRequest(searchId, request);
|
||||
searchService.sendExecuteQuery(node, internalRequest, new ActionListener<ScrollQuerySearchResult>() {
|
||||
@Override
|
||||
public void onResponse(ScrollQuerySearchResult result) {
|
||||
queryResults.set(shardIndex, result.queryResult());
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
onFailure(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onQueryPhaseFailure(shardIndex, counter, searchId, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
void onQueryPhaseFailure(final int shardIndex, final AtomicInteger counter, final long searchId, Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("[{}] Failed to execute query phase", t, searchId);
|
||||
}
|
||||
addShardFailure(shardIndex, new ShardSearchFailure(t));
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
if (successfulOps.get() == 0) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "all shards failed", t, buildShardFailures()));
|
||||
} else {
|
||||
try {
|
||||
executeFetchPhase();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new SearchPhaseExecutionException("query", "Fetch failed", e, ShardSearchFailure.EMPTY_ARRAY));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void executeFetchPhase() throws Exception {
|
||||
sortedShardList = searchPhaseController.sortDocs(true, queryResults);
|
||||
AtomicArray<IntArrayList> docIdsToLoad = new AtomicArray<>(queryResults.length());
|
||||
searchPhaseController.fillDocIdsToLoad(docIdsToLoad, sortedShardList);
|
||||
|
||||
if (docIdsToLoad.asList().isEmpty()) {
|
||||
finishHim();
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
final ScoreDoc[] lastEmittedDocPerShard = searchPhaseController.getLastEmittedDocPerShard(sortedShardList, queryResults.length());
|
||||
final AtomicInteger counter = new AtomicInteger(docIdsToLoad.asList().size());
|
||||
for (final AtomicArray.Entry<IntArrayList> entry : docIdsToLoad.asList()) {
|
||||
IntArrayList docIds = entry.value;
|
||||
final QuerySearchResult querySearchResult = queryResults.get(entry.index);
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
ShardFetchRequest shardFetchRequest = new ShardFetchRequest(querySearchResult.id(), docIds, lastEmittedDoc);
|
||||
DiscoveryNode node = nodes.get(querySearchResult.shardTarget().nodeId());
|
||||
searchService.sendExecuteFetchScroll(node, shardFetchRequest, new ActionListener<FetchSearchResult>() {
|
||||
@Override
|
||||
public void onResponse(FetchSearchResult result) {
|
||||
result.shardTarget(querySearchResult.shardTarget());
|
||||
fetchResults.set(entry.index, result);
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Failed to execute fetch phase", t);
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
finishHim();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
private void finishHim() {
|
||||
try {
|
||||
innerFinishHim();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()));
|
||||
}
|
||||
}
|
||||
|
||||
private void innerFinishHim() {
|
||||
InternalSearchResponse internalResponse = searchPhaseController.merge(sortedShardList, queryResults, fetchResults);
|
||||
String scrollId = null;
|
||||
if (request.scroll() != null) {
|
||||
scrollId = request.scrollId();
|
||||
}
|
||||
listener.onResponse(new SearchResponse(internalResponse, scrollId, this.scrollId.getContext().length, successfulOps.get(),
|
||||
buildTookInMillis(), buildShardFailures()));
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,406 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import org.apache.lucene.search.ScoreDoc;
|
||||
import org.apache.lucene.search.TopDocs;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.search.ReduceSearchPhaseException;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchPhaseExecutionException;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchServiceTransportAction;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
import org.elasticsearch.search.query.QuerySearchResult;
|
||||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import static org.elasticsearch.action.search.type.TransportSearchHelper.internalSearchRequest;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public abstract class TransportSearchTypeAction extends TransportAction<SearchRequest, SearchResponse> {
|
||||
|
||||
protected final ClusterService clusterService;
|
||||
|
||||
protected final SearchServiceTransportAction searchService;
|
||||
|
||||
protected final SearchPhaseController searchPhaseController;
|
||||
|
||||
public TransportSearchTypeAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
SearchServiceTransportAction searchService, SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, actionFilters, indexNameExpressionResolver, clusterService.getTaskManager());
|
||||
this.clusterService = clusterService;
|
||||
this.searchService = searchService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
}
|
||||
|
||||
protected abstract class BaseAsyncAction<FirstResult extends SearchPhaseResult> extends AbstractAsyncAction {
|
||||
|
||||
protected final ActionListener<SearchResponse> listener;
|
||||
|
||||
protected final GroupShardsIterator shardsIts;
|
||||
|
||||
protected final SearchRequest request;
|
||||
|
||||
protected final ClusterState clusterState;
|
||||
protected final DiscoveryNodes nodes;
|
||||
|
||||
protected final int expectedSuccessfulOps;
|
||||
private final int expectedTotalOps;
|
||||
|
||||
protected final AtomicInteger successfulOps = new AtomicInteger();
|
||||
private final AtomicInteger totalOps = new AtomicInteger();
|
||||
|
||||
protected final AtomicArray<FirstResult> firstResults;
|
||||
private volatile AtomicArray<ShardSearchFailure> shardFailures;
|
||||
private final Object shardFailuresMutex = new Object();
|
||||
protected volatile ScoreDoc[] sortedShardList;
|
||||
|
||||
protected BaseAsyncAction(SearchRequest request, ActionListener<SearchResponse> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
this.clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
|
||||
|
||||
// TODO: I think startTime() should become part of ActionRequest and that should be used both for index name
|
||||
// date math expressions and $now in scripts. This way all apis will deal with now in the same way instead
|
||||
// of just for the _search api
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request.indicesOptions(), startTime(), request.indices());
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
clusterState.blocks().indexBlockedRaiseException(ClusterBlockLevel.READ, index);
|
||||
}
|
||||
|
||||
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
|
||||
|
||||
shardsIts = clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
|
||||
expectedSuccessfulOps = shardsIts.size();
|
||||
// we need to add 1 for non active partition, since we count it in the total!
|
||||
expectedTotalOps = shardsIts.totalSizeWith1ForEmpty();
|
||||
|
||||
firstResults = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (expectedSuccessfulOps == 0) {
|
||||
// no search shards to search on, bail with empty response (it happens with search across _all with no indices around and consistent with broadcast operations)
|
||||
listener.onResponse(new SearchResponse(InternalSearchResponse.empty(), null, 0, 0, buildTookInMillis(), ShardSearchFailure.EMPTY_ARRAY));
|
||||
return;
|
||||
}
|
||||
int shardIndex = -1;
|
||||
for (final ShardIterator shardIt : shardsIts) {
|
||||
shardIndex++;
|
||||
final ShardRouting shard = shardIt.nextOrNull();
|
||||
if (shard != null) {
|
||||
performFirstPhase(shardIndex, shardIt, shard);
|
||||
} else {
|
||||
// really, no shards active in this group
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void performFirstPhase(final int shardIndex, final ShardIterator shardIt, final ShardRouting shard) {
|
||||
if (shard == null) {
|
||||
// no more active shards... (we should not really get here, but just for safety)
|
||||
onFirstPhaseResult(shardIndex, null, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
final DiscoveryNode node = nodes.get(shard.currentNodeId());
|
||||
if (node == null) {
|
||||
onFirstPhaseResult(shardIndex, shard, null, shardIt, new NoShardAvailableActionException(shardIt.shardId()));
|
||||
} else {
|
||||
String[] filteringAliases = indexNameExpressionResolver.filteringAliases(clusterState, shard.index().getName(), request.indices());
|
||||
sendExecuteFirstPhase(node, internalSearchRequest(shard, shardsIts.size(), request, filteringAliases, startTime()), new ActionListener<FirstResult>() {
|
||||
@Override
|
||||
public void onResponse(FirstResult result) {
|
||||
onFirstPhaseResult(shardIndex, shard, result, shardIt);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onFirstPhaseResult(shardIndex, shard, node.id(), shardIt, t);
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
|
||||
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
|
||||
processFirstPhaseResult(shardIndex, result);
|
||||
// we need to increment successful ops first before we compare the exit condition otherwise if we
|
||||
// are fast we could concurrently update totalOps but then preempt one of the threads which can
|
||||
// cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
|
||||
successfulOps.incrementAndGet();
|
||||
// increment all the "future" shards to update the total ops since we some may work and some may not...
|
||||
// and when that happens, we break on total ops, so we must maintain them
|
||||
final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
|
||||
if (xTotalOps == expectedTotalOps) {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "] while moving to second phase", e);
|
||||
}
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, buildShardFailures()));
|
||||
}
|
||||
} else if (xTotalOps > expectedTotalOps) {
|
||||
raiseEarlyFailure(new IllegalStateException("unexpected higher total ops [" + xTotalOps + "] compared to expected [" + expectedTotalOps + "]"));
|
||||
}
|
||||
}
|
||||
|
||||
void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nullable String nodeId, final ShardIterator shardIt, Throwable t) {
|
||||
// we always add the shard failure for a specific shard instance
|
||||
// we do make sure to clean it on a successful response from a shard
|
||||
SearchShardTarget shardTarget = new SearchShardTarget(nodeId, shardIt.shardId().getIndex(), shardIt.shardId().getId());
|
||||
addShardFailure(shardIndex, shardTarget, t);
|
||||
|
||||
if (totalOps.incrementAndGet() == expectedTotalOps) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
if (shard != null) {
|
||||
logger.debug(shard.shortSummary() + ": Failed to execute [" + request + "]", t);
|
||||
} else {
|
||||
logger.debug(shardIt.shardId() + ": Failed to execute [" + request + "]", t);
|
||||
}
|
||||
} else if (logger.isTraceEnabled()) {
|
||||
logger.trace("{}: Failed to execute [{}]", t, shard, request);
|
||||
}
|
||||
}
|
||||
final ShardSearchFailure[] shardSearchFailures = buildShardFailures();
|
||||
if (successfulOps.get() == 0) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("All shards failed for phase: [{}]", t, firstPhaseName());
|
||||
}
|
||||
|
||||
// no successful ops, raise an exception
|
||||
raiseEarlyFailure(new SearchPhaseExecutionException(firstPhaseName(), "all shards failed", t, shardSearchFailures));
|
||||
} else {
|
||||
try {
|
||||
innerMoveToSecondPhase();
|
||||
} catch (Throwable e) {
|
||||
raiseEarlyFailure(new ReduceSearchPhaseException(firstPhaseName(), "", e, shardSearchFailures));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
final ShardRouting nextShard = shardIt.nextOrNull();
|
||||
final boolean lastShard = nextShard == null;
|
||||
// trace log this exception
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
if (!lastShard) {
|
||||
try {
|
||||
performFirstPhase(shardIndex, shardIt, nextShard);
|
||||
} catch (Throwable t1) {
|
||||
onFirstPhaseResult(shardIndex, shard, shard.currentNodeId(), shardIt, t1);
|
||||
}
|
||||
} else {
|
||||
// no more shards active, add a failure
|
||||
if (logger.isDebugEnabled() && !logger.isTraceEnabled()) { // do not double log this exception
|
||||
if (t != null && !TransportActions.isShardNotAvailableException(t)) {
|
||||
logger.debug(executionFailureMsg(shard, shardIt, request, lastShard), t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private String executionFailureMsg(@Nullable ShardRouting shard, final ShardIterator shardIt, SearchRequest request, boolean lastShard) {
|
||||
if (shard != null) {
|
||||
return shard.shortSummary() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
} else {
|
||||
return shardIt.shardId() + ": Failed to execute [" + request + "] lastShard [" + lastShard + "]";
|
||||
}
|
||||
}
|
||||
|
||||
protected final ShardSearchFailure[] buildShardFailures() {
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures == null) {
|
||||
return ShardSearchFailure.EMPTY_ARRAY;
|
||||
}
|
||||
List<AtomicArray.Entry<ShardSearchFailure>> entries = shardFailures.asList();
|
||||
ShardSearchFailure[] failures = new ShardSearchFailure[entries.size()];
|
||||
for (int i = 0; i < failures.length; i++) {
|
||||
failures[i] = entries.get(i).value;
|
||||
}
|
||||
return failures;
|
||||
}
|
||||
|
||||
protected final void addShardFailure(final int shardIndex, @Nullable SearchShardTarget shardTarget, Throwable t) {
|
||||
// we don't aggregate shard failures on non active shards (but do keep the header counts right)
|
||||
if (TransportActions.isShardNotAvailableException(t)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// lazily create shard failures, so we can early build the empty shard failure list in most cases (no failures)
|
||||
if (shardFailures == null) {
|
||||
synchronized (shardFailuresMutex) {
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new AtomicArray<>(shardsIts.size());
|
||||
}
|
||||
}
|
||||
}
|
||||
ShardSearchFailure failure = shardFailures.get(shardIndex);
|
||||
if (failure == null) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
} else {
|
||||
// the failure is already present, try and not override it with an exception that is less meaningless
|
||||
// for example, getting illegal shard state
|
||||
if (TransportActions.isReadOverrideException(t)) {
|
||||
shardFailures.set(shardIndex, new ShardSearchFailure(t, shardTarget));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void raiseEarlyFailure(Throwable t) {
|
||||
for (AtomicArray.Entry<FirstResult> entry : firstResults.asList()) {
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
listener.onFailure(t);
|
||||
}
|
||||
|
||||
/**
|
||||
* Releases shard targets that are not used in the docsIdsToLoad.
|
||||
*/
|
||||
protected void releaseIrrelevantSearchContexts(AtomicArray<? extends QuerySearchResultProvider> queryResults,
|
||||
AtomicArray<IntArrayList> docIdsToLoad) {
|
||||
if (docIdsToLoad == null) {
|
||||
return;
|
||||
}
|
||||
// we only release search context that we did not fetch from if we are not scrolling
|
||||
if (request.scroll() == null) {
|
||||
for (AtomicArray.Entry<? extends QuerySearchResultProvider> entry : queryResults.asList()) {
|
||||
final TopDocs topDocs = entry.value.queryResult().queryResult().topDocs();
|
||||
if (topDocs != null && topDocs.scoreDocs.length > 0 // the shard had matches
|
||||
&& docIdsToLoad.get(entry.index) == null) { // but none of them made it to the global top docs
|
||||
try {
|
||||
DiscoveryNode node = nodes.get(entry.value.queryResult().shardTarget().nodeId());
|
||||
sendReleaseSearchContext(entry.value.queryResult().id(), node);
|
||||
} catch (Throwable t1) {
|
||||
logger.trace("failed to release context", t1);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void sendReleaseSearchContext(long contextId, DiscoveryNode node) {
|
||||
if (node != null) {
|
||||
searchService.sendFreeContext(node, contextId, request);
|
||||
}
|
||||
}
|
||||
|
||||
protected ShardFetchSearchRequest createFetchRequest(QuerySearchResult queryResult, AtomicArray.Entry<IntArrayList> entry, ScoreDoc[] lastEmittedDocPerShard) {
|
||||
if (lastEmittedDocPerShard != null) {
|
||||
ScoreDoc lastEmittedDoc = lastEmittedDocPerShard[entry.index];
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value, lastEmittedDoc);
|
||||
} else {
|
||||
return new ShardFetchSearchRequest(request, queryResult.id(), entry.value);
|
||||
}
|
||||
}
|
||||
|
||||
protected abstract void sendExecuteFirstPhase(DiscoveryNode node, ShardSearchTransportRequest request, ActionListener<FirstResult> listener);
|
||||
|
||||
protected final void processFirstPhaseResult(int shardIndex, FirstResult result) {
|
||||
firstResults.set(shardIndex, result);
|
||||
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("got first-phase result from {}", result != null ? result.shardTarget() : null);
|
||||
}
|
||||
|
||||
// clean a previous error on this shard group (note, this code will be serialized on the same shardIndex value level
|
||||
// so its ok concurrency wise to miss potentially the shard failures being created because of another failure
|
||||
// in the #addShardFailure, because by definition, it will happen on *another* shardIndex
|
||||
AtomicArray<ShardSearchFailure> shardFailures = this.shardFailures;
|
||||
if (shardFailures != null) {
|
||||
shardFailures.set(shardIndex, null);
|
||||
}
|
||||
}
|
||||
|
||||
final void innerMoveToSecondPhase() throws Exception {
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
boolean hadOne = false;
|
||||
for (int i = 0; i < firstResults.length(); i++) {
|
||||
FirstResult result = firstResults.get(i);
|
||||
if (result == null) {
|
||||
continue; // failure
|
||||
}
|
||||
if (hadOne) {
|
||||
sb.append(",");
|
||||
} else {
|
||||
hadOne = true;
|
||||
}
|
||||
sb.append(result.shardTarget());
|
||||
}
|
||||
|
||||
logger.trace("Moving to second phase, based on results from: {} (cluster state version: {})", sb, clusterState.version());
|
||||
}
|
||||
moveToSecondPhase();
|
||||
}
|
||||
|
||||
protected abstract void moveToSecondPhase() throws Exception;
|
||||
|
||||
protected abstract String firstPhaseName();
|
||||
}
|
||||
}
|
|
@ -56,6 +56,11 @@ public interface ClusterService extends LifecycleComponent<ClusterService> {
|
|||
*/
|
||||
void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException;
|
||||
|
||||
/**
|
||||
* Remove an initial block to be set on the first cluster state created.
|
||||
*/
|
||||
void removeInitialStateBlock(int blockId) throws IllegalStateException;
|
||||
|
||||
/**
|
||||
* The operation routing.
|
||||
*/
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.publish.PublishClusterStateAction;
|
||||
|
||||
|
@ -70,7 +69,7 @@ import java.util.Set;
|
|||
* and cluster state {@link #status}, which is updated during cluster state publishing and applying
|
||||
* processing. The cluster state can be updated only on the master node. All updates are performed by on a
|
||||
* single thread and controlled by the {@link InternalClusterService}. After every update the
|
||||
* {@link DiscoveryService#publish} method publishes new version of the cluster state to all other nodes in the
|
||||
* {@link Discovery#publish} method publishes new version of the cluster state to all other nodes in the
|
||||
* cluster. The actual publishing mechanism is delegated to the {@link Discovery#publish} method and depends on
|
||||
* the type of discovery. For example, for local discovery it is implemented by the {@link LocalDiscovery#publish}
|
||||
* method. In the Zen Discovery it is handled in the {@link PublishClusterStateAction#publish} method. The
|
||||
|
|
|
@ -340,6 +340,12 @@ public class ClusterBlocks extends AbstractDiffable<ClusterBlocks> {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder removeGlobalBlock(int blockId) {
|
||||
global.removeIf(block -> block.id() == blockId);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
public Builder addIndexBlock(String index, ClusterBlock block) {
|
||||
if (!indices.containsKey(index)) {
|
||||
indices.put(index, new HashSet<>());
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.elasticsearch.cluster.routing.OperationRouting;
|
|||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Priority;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
|
@ -64,7 +65,6 @@ import org.elasticsearch.common.util.concurrent.PrioritizedEsThreadPoolExecutor;
|
|||
import org.elasticsearch.common.util.concurrent.PrioritizedRunnable;
|
||||
import org.elasticsearch.common.util.iterable.Iterables;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.tasks.TaskManager;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -76,7 +76,9 @@ import java.util.Iterator;
|
|||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Queue;
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.Executor;
|
||||
|
@ -84,6 +86,7 @@ import java.util.concurrent.Future;
|
|||
import java.util.concurrent.ScheduledFuture;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.function.BiConsumer;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
import static org.elasticsearch.common.util.concurrent.EsExecutors.daemonThreadFactory;
|
||||
|
@ -97,9 +100,12 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
public static final Setting<TimeValue> CLUSTER_SERVICE_RECONNECT_INTERVAL_SETTING = Setting.positiveTimeSetting("cluster.service.reconnect_interval", TimeValue.timeValueSeconds(10), false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final String UPDATE_THREAD_NAME = "clusterService#updateTask";
|
||||
public static final Setting<Long> NODE_ID_SEED_SETTING =
|
||||
// don't use node.id.seed so it won't be seen as an attribute
|
||||
Setting.longSetting("node_id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
|
||||
private final ThreadPool threadPool;
|
||||
|
||||
private final DiscoveryService discoveryService;
|
||||
private BiConsumer<ClusterChangedEvent, Discovery.AckListener> clusterStatePublisher;
|
||||
|
||||
private final OperationRouting operationRouting;
|
||||
|
||||
|
@ -139,12 +145,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
private volatile ScheduledFuture reconnectToNodes;
|
||||
|
||||
@Inject
|
||||
public InternalClusterService(Settings settings, DiscoveryService discoveryService, OperationRouting operationRouting, TransportService transportService,
|
||||
public InternalClusterService(Settings settings, OperationRouting operationRouting, TransportService transportService,
|
||||
ClusterSettings clusterSettings, ThreadPool threadPool, ClusterName clusterName, DiscoveryNodeService discoveryNodeService, Version version) {
|
||||
super(settings);
|
||||
this.operationRouting = operationRouting;
|
||||
this.transportService = transportService;
|
||||
this.discoveryService = discoveryService;
|
||||
this.threadPool = threadPool;
|
||||
this.clusterSettings = clusterSettings;
|
||||
this.discoveryNodeService = discoveryNodeService;
|
||||
|
@ -161,7 +166,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
localNodeMasterListeners = new LocalNodeMasterListeners(threadPool);
|
||||
|
||||
initialBlocks = ClusterBlocks.builder().addGlobalBlock(discoveryService.getNoMasterBlock());
|
||||
initialBlocks = ClusterBlocks.builder();
|
||||
|
||||
taskManager = transportService.getTaskManager();
|
||||
}
|
||||
|
@ -170,6 +175,10 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
this.slowTaskLoggingThreshold = slowTaskLoggingThreshold;
|
||||
}
|
||||
|
||||
public void setClusterStatePublisher(BiConsumer<ClusterChangedEvent, Discovery.AckListener> publisher) {
|
||||
clusterStatePublisher = publisher;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
if (lifecycle.started()) {
|
||||
|
@ -180,14 +189,20 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
|
||||
@Override
|
||||
public void removeInitialStateBlock(ClusterBlock block) throws IllegalStateException {
|
||||
removeInitialStateBlock(block.id());
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeInitialStateBlock(int blockId) throws IllegalStateException {
|
||||
if (lifecycle.started()) {
|
||||
throw new IllegalStateException("can't set initial block when started");
|
||||
}
|
||||
initialBlocks.removeGlobalBlock(block);
|
||||
initialBlocks.removeGlobalBlock(blockId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
Objects.requireNonNull(clusterStatePublisher, "please set a cluster state publisher before starting");
|
||||
add(localNodeMasterListeners);
|
||||
add(taskManager);
|
||||
this.clusterState = ClusterState.builder(clusterState).blocks(initialBlocks).build();
|
||||
|
@ -195,7 +210,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
this.reconnectToNodes = threadPool.schedule(reconnectInterval, ThreadPool.Names.GENERIC, new ReconnectToNodes());
|
||||
Map<String, String> nodeAttributes = discoveryNodeService.buildAttributes();
|
||||
// note, we rely on the fact that its a new id each time we start, see FD and "kill -9" handling
|
||||
final String nodeId = DiscoveryService.generateNodeId(settings);
|
||||
final String nodeId = generateNodeId(settings);
|
||||
final TransportAddress publishAddress = transportService.boundAddress().publishAddress();
|
||||
DiscoveryNode localNode = new DiscoveryNode(settings.get("node.name"), nodeId, publishAddress, nodeAttributes, version);
|
||||
DiscoveryNodes.Builder nodeBuilder = DiscoveryNodes.builder().put(localNode).localNodeId(localNode.id());
|
||||
|
@ -572,7 +587,7 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
if (newClusterState.nodes().localNodeMaster()) {
|
||||
logger.debug("publishing cluster state version [{}]", newClusterState.version());
|
||||
try {
|
||||
discoveryService.publish(clusterChangedEvent, ackListener);
|
||||
clusterStatePublisher.accept(clusterChangedEvent, ackListener);
|
||||
} catch (Discovery.FailedToCommitClusterStateException t) {
|
||||
logger.warn("failing [{}]: failed to commit cluster state version [{}]", t, source, newClusterState.version());
|
||||
proccessedListeners.forEach(task -> task.listener.onFailure(task.source, t));
|
||||
|
@ -853,6 +868,11 @@ public class InternalClusterService extends AbstractLifecycleComponent<ClusterSe
|
|||
}
|
||||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
Random random = Randomness.get(settings, NODE_ID_SEED_SETTING);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
|
||||
private boolean nodeRequiresConnection(DiscoveryNode node) {
|
||||
return localNode().shouldConnectTo(node);
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ import java.util.concurrent.ThreadLocalRandom;
|
|||
* setting a reproducible seed. When running the Elasticsearch server
|
||||
* process, non-reproducible sources of randomness are provided (unless
|
||||
* a setting is provided for a module that exposes a seed setting (e.g.,
|
||||
* DiscoveryService#DISCOVERY_SEED_SETTING)).
|
||||
* DiscoveryService#NODE_ID_SEED_SETTING)).
|
||||
*/
|
||||
public final class Randomness {
|
||||
private static final Method currentMethod;
|
||||
|
|
|
@ -48,7 +48,6 @@ import org.elasticsearch.common.network.NetworkService;
|
|||
import org.elasticsearch.common.util.concurrent.EsExecutors;
|
||||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
|
@ -62,11 +61,11 @@ import org.elasticsearch.http.HttpTransportSettings;
|
|||
import org.elasticsearch.http.netty.NettyHttpServerTransport;
|
||||
import org.elasticsearch.index.IndexSettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.analysis.HunspellService;
|
||||
import org.elasticsearch.indices.breaker.HierarchyCircuitBreakerService;
|
||||
import org.elasticsearch.indices.IndicesQueryCache;
|
||||
import org.elasticsearch.indices.IndicesRequestCache;
|
||||
import org.elasticsearch.indices.fielddata.cache.IndicesFieldDataCache;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
import org.elasticsearch.indices.store.IndicesStore;
|
||||
|
@ -324,8 +323,8 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
Environment.PATH_SCRIPTS_SETTING,
|
||||
Environment.PATH_SHARED_DATA_SETTING,
|
||||
Environment.PIDFILE_SETTING,
|
||||
DiscoveryService.DISCOVERY_SEED_SETTING,
|
||||
DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
InternalClusterService.NODE_ID_SEED_SETTING,
|
||||
DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING,
|
||||
DiscoveryModule.DISCOVERY_TYPE_SETTING,
|
||||
DiscoveryModule.ZEN_MASTER_SERVICE_TYPE_SETTING,
|
||||
FaultDetection.PING_RETRIES_SETTING,
|
||||
|
|
|
@ -39,10 +39,6 @@ public interface Discovery extends LifecycleComponent<Discovery> {
|
|||
|
||||
DiscoveryNode localNode();
|
||||
|
||||
void addListener(InitialStateDiscoveryListener listener);
|
||||
|
||||
void removeListener(InitialStateDiscoveryListener listener);
|
||||
|
||||
String nodeDescription();
|
||||
|
||||
/**
|
||||
|
@ -93,13 +89,13 @@ public interface Discovery extends LifecycleComponent<Discovery> {
|
|||
*/
|
||||
DiscoveryStats stats();
|
||||
|
||||
DiscoverySettings getDiscoverySettings();
|
||||
|
||||
/**
|
||||
* Triggers the first join cycle
|
||||
*/
|
||||
void startInitialJoin();
|
||||
|
||||
|
||||
/***
|
||||
* @return the current value of minimum master nodes, or -1 for not set
|
||||
*/
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
|
|||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -50,7 +51,7 @@ public class DiscoveryModule extends AbstractModule {
|
|||
"zen", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
|
||||
private final Settings settings;
|
||||
private final List<Class<? extends UnicastHostsProvider>> unicastHostProviders = new ArrayList<>();
|
||||
private final Map<String, List<Class<? extends UnicastHostsProvider>>> unicastHostProviders = new HashMap<>();
|
||||
private final ExtensionPoint.ClassSet<ZenPing> zenPings = new ExtensionPoint.ClassSet<>("zen_ping", ZenPing.class);
|
||||
private final Map<String, Class<? extends Discovery>> discoveryTypes = new HashMap<>();
|
||||
private final Map<String, Class<? extends ElectMasterService>> masterServiceType = new HashMap<>();
|
||||
|
@ -66,9 +67,17 @@ public class DiscoveryModule extends AbstractModule {
|
|||
|
||||
/**
|
||||
* Adds a custom unicast hosts provider to build a dynamic list of unicast hosts list when doing unicast discovery.
|
||||
*
|
||||
* @param type discovery for which this provider is relevant
|
||||
* @param unicastHostProvider the host provider
|
||||
*/
|
||||
public void addUnicastHostProvider(Class<? extends UnicastHostsProvider> unicastHostProvider) {
|
||||
unicastHostProviders.add(unicastHostProvider);
|
||||
public void addUnicastHostProvider(String type, Class<? extends UnicastHostsProvider> unicastHostProvider) {
|
||||
List<Class<? extends UnicastHostsProvider>> providerList = unicastHostProviders.get(type);
|
||||
if (providerList == null) {
|
||||
providerList = new ArrayList<>();
|
||||
unicastHostProviders.put(type, providerList);
|
||||
}
|
||||
providerList.add(unicastHostProvider);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -116,12 +125,12 @@ public class DiscoveryModule extends AbstractModule {
|
|||
}
|
||||
bind(ZenPingService.class).asEagerSingleton();
|
||||
Multibinder<UnicastHostsProvider> unicastHostsProviderMultibinder = Multibinder.newSetBinder(binder(), UnicastHostsProvider.class);
|
||||
for (Class<? extends UnicastHostsProvider> unicastHostProvider : unicastHostProviders) {
|
||||
for (Class<? extends UnicastHostsProvider> unicastHostProvider :
|
||||
unicastHostProviders.getOrDefault(discoveryType, Collections.emptyList())) {
|
||||
unicastHostsProviderMultibinder.addBinding().to(unicastHostProvider);
|
||||
}
|
||||
zenPings.bind(binder());
|
||||
}
|
||||
bind(Discovery.class).to(discoveryClass).asEagerSingleton();
|
||||
bind(DiscoveryService.class).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Random;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class DiscoveryService extends AbstractLifecycleComponent<DiscoveryService> {
|
||||
|
||||
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Long> DISCOVERY_SEED_SETTING = Setting.longSetting("discovery.id.seed", 0L, Long.MIN_VALUE, false, Setting.Scope.CLUSTER);
|
||||
|
||||
private static class InitialStateListener implements InitialStateDiscoveryListener {
|
||||
|
||||
private final CountDownLatch latch = new CountDownLatch(1);
|
||||
private volatile boolean initialStateReceived;
|
||||
|
||||
@Override
|
||||
public void initialStateProcessed() {
|
||||
initialStateReceived = true;
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
public boolean waitForInitialState(TimeValue timeValue) throws InterruptedException {
|
||||
if (timeValue.millis() > 0) {
|
||||
latch.await(timeValue.millis(), TimeUnit.MILLISECONDS);
|
||||
}
|
||||
return initialStateReceived;
|
||||
}
|
||||
}
|
||||
|
||||
private final TimeValue initialStateTimeout;
|
||||
private final Discovery discovery;
|
||||
private InitialStateListener initialStateListener;
|
||||
private final DiscoverySettings discoverySettings;
|
||||
|
||||
@Inject
|
||||
public DiscoveryService(Settings settings, DiscoverySettings discoverySettings, Discovery discovery) {
|
||||
super(settings);
|
||||
this.discoverySettings = discoverySettings;
|
||||
this.discovery = discovery;
|
||||
this.initialStateTimeout = INITIAL_STATE_TIMEOUT_SETTING.get(settings);
|
||||
}
|
||||
|
||||
public ClusterBlock getNoMasterBlock() {
|
||||
return discoverySettings.getNoMasterBlock();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
initialStateListener = new InitialStateListener();
|
||||
discovery.addListener(initialStateListener);
|
||||
discovery.start();
|
||||
logger.info(discovery.nodeDescription());
|
||||
}
|
||||
|
||||
public void joinClusterAndWaitForInitialState() {
|
||||
try {
|
||||
discovery.startInitialJoin();
|
||||
if (!initialStateListener.waitForInitialState(initialStateTimeout)) {
|
||||
logger.warn("waited for {} and no initial state was set by the discovery", initialStateTimeout);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
Thread.currentThread().interrupt();
|
||||
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStop() {
|
||||
if (initialStateListener != null) {
|
||||
discovery.removeListener(initialStateListener);
|
||||
}
|
||||
discovery.stop();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doClose() {
|
||||
discovery.close();
|
||||
}
|
||||
|
||||
public DiscoveryNode localNode() {
|
||||
return discovery.localNode();
|
||||
}
|
||||
|
||||
public String nodeDescription() {
|
||||
return discovery.nodeDescription();
|
||||
}
|
||||
|
||||
/**
|
||||
* Publish all the changes to the cluster from the master (can be called just by the master). The publish
|
||||
* process should not publish this state to the master as well! (the master is sending it...).
|
||||
* <p>
|
||||
* The {@link org.elasticsearch.discovery.Discovery.AckListener} allows to acknowledge the publish
|
||||
* event based on the response gotten from all nodes
|
||||
*/
|
||||
public void publish(ClusterChangedEvent clusterChangedEvent, Discovery.AckListener ackListener) {
|
||||
if (lifecycle.started()) {
|
||||
discovery.publish(clusterChangedEvent, ackListener);
|
||||
}
|
||||
}
|
||||
|
||||
public static String generateNodeId(Settings settings) {
|
||||
Random random = Randomness.get(settings, DiscoveryService.DISCOVERY_SEED_SETTING);
|
||||
return Strings.randomBase64UUID(random);
|
||||
}
|
||||
}
|
|
@ -22,7 +22,6 @@ package org.elasticsearch.discovery;
|
|||
import org.elasticsearch.cluster.block.ClusterBlock;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -52,6 +51,7 @@ public class DiscoverySettings extends AbstractComponent {
|
|||
public static final Setting<TimeValue> COMMIT_TIMEOUT_SETTING = new Setting<>("discovery.zen.commit_timeout", (s) -> PUBLISH_TIMEOUT_SETTING.getRaw(s), (s) -> TimeValue.parseTimeValue(s, TimeValue.timeValueSeconds(30), "discovery.zen.commit_timeout"), true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<ClusterBlock> NO_MASTER_BLOCK_SETTING = new Setting<>("discovery.zen.no_master_block", "write", DiscoverySettings::parseNoMasterBlock, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<Boolean> PUBLISH_DIFF_ENABLE_SETTING = Setting.boolSetting("discovery.zen.publish_diff.enable", true, true, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> INITIAL_STATE_TIMEOUT_SETTING = Setting.positiveTimeSetting("discovery.initial_state_timeout", TimeValue.timeValueSeconds(30), false, Setting.Scope.CLUSTER);
|
||||
|
||||
private volatile ClusterBlock noMasterBlock;
|
||||
private volatile TimeValue publishTimeout;
|
||||
|
@ -59,7 +59,6 @@ public class DiscoverySettings extends AbstractComponent {
|
|||
private volatile TimeValue commitTimeout;
|
||||
private volatile boolean publishDiff;
|
||||
|
||||
@Inject
|
||||
public DiscoverySettings(Settings settings, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
clusterSettings.addSettingsUpdateConsumer(NO_MASTER_BLOCK_SETTING, this::setNoMasterBlock);
|
||||
|
|
|
@ -36,6 +36,7 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.inject.internal.Nullable;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
|
@ -44,14 +45,12 @@ import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.InitialStateDiscoveryListener;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
|
||||
import static org.elasticsearch.cluster.ClusterState.Builder;
|
||||
|
@ -73,19 +72,16 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
|
||||
private final AtomicBoolean initialStateSent = new AtomicBoolean();
|
||||
|
||||
private final CopyOnWriteArrayList<InitialStateDiscoveryListener> initialStateListeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
private static final ConcurrentMap<ClusterName, ClusterGroup> clusterGroups = ConcurrentCollections.newConcurrentMap();
|
||||
|
||||
private volatile ClusterState lastProcessedClusterState;
|
||||
|
||||
@Inject
|
||||
public LocalDiscovery(Settings settings, ClusterName clusterName, ClusterService clusterService,
|
||||
DiscoverySettings discoverySettings) {
|
||||
public LocalDiscovery(Settings settings, ClusterName clusterName, ClusterService clusterService, ClusterSettings clusterSettings) {
|
||||
super(settings);
|
||||
this.clusterName = clusterName;
|
||||
this.clusterService = clusterService;
|
||||
this.discoverySettings = discoverySettings;
|
||||
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -150,11 +146,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
public void onFailure(String source, Throwable t) {
|
||||
logger.error("unexpected failure during [{}]", t, source);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
sendInitialStateEventIfNeeded();
|
||||
}
|
||||
});
|
||||
} else if (firstMaster != null) {
|
||||
// tell the master to send the fact that we are here
|
||||
|
@ -182,7 +173,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
sendInitialStateEventIfNeeded();
|
||||
// we reroute not in the same cluster state update since in certain areas we rely on
|
||||
// the node to be in the cluster state (sampled from ClusterService#state) to be there, also
|
||||
// shard transitions need to better be handled in such cases
|
||||
|
@ -266,16 +256,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
return clusterService.localNode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addListener(InitialStateDiscoveryListener listener) {
|
||||
this.initialStateListeners.add(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeListener(InitialStateDiscoveryListener listener) {
|
||||
this.initialStateListeners.remove(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String nodeDescription() {
|
||||
return clusterName.value() + "/" + localNode().id();
|
||||
|
@ -304,6 +284,11 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
return new DiscoveryStats(null);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
return discoverySettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return -1;
|
||||
|
@ -403,7 +388,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
sendInitialStateEventIfNeeded();
|
||||
publishResponseHandler.onResponse(discovery.localNode());
|
||||
}
|
||||
});
|
||||
|
@ -436,14 +420,6 @@ public class LocalDiscovery extends AbstractLifecycleComponent<Discovery> implem
|
|||
}
|
||||
}
|
||||
|
||||
private void sendInitialStateEventIfNeeded() {
|
||||
if (initialStateSent.compareAndSet(false, true)) {
|
||||
for (InitialStateDiscoveryListener listener : initialStateListeners) {
|
||||
listener.initialStateProcessed();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class ClusterGroup {
|
||||
|
||||
private Queue<LocalDiscovery> members = ConcurrentCollections.newQueue();
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.InitialStateDiscoveryListener;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
|
||||
import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
|
||||
|
@ -76,7 +75,6 @@ import java.util.ArrayList;
|
|||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
|
@ -134,13 +132,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
private final boolean masterElectionFilterDataNodes;
|
||||
private final TimeValue masterElectionWaitForJoinsTimeout;
|
||||
|
||||
|
||||
private final CopyOnWriteArrayList<InitialStateDiscoveryListener> initialStateListeners = new CopyOnWriteArrayList<>();
|
||||
|
||||
private final JoinThreadControl joinThreadControl;
|
||||
|
||||
private final AtomicBoolean initialStateSent = new AtomicBoolean();
|
||||
|
||||
/** counts the time this node has joined the cluster or have elected it self as master */
|
||||
private final AtomicLong clusterJoinsCounter = new AtomicLong();
|
||||
|
||||
|
@ -154,13 +147,12 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
@Inject
|
||||
public ZenDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool,
|
||||
TransportService transportService, final ClusterService clusterService, ClusterSettings clusterSettings,
|
||||
ZenPingService pingService, ElectMasterService electMasterService,
|
||||
DiscoverySettings discoverySettings) {
|
||||
ZenPingService pingService, ElectMasterService electMasterService) {
|
||||
super(settings);
|
||||
this.clusterName = clusterName;
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
this.discoverySettings = discoverySettings;
|
||||
this.discoverySettings = new DiscoverySettings(settings, clusterSettings);
|
||||
this.pingService = pingService;
|
||||
this.electMaster = electMasterService;
|
||||
this.pingTimeout = PING_TIMEOUT_SETTING.get(settings);
|
||||
|
@ -248,7 +240,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
pingService.stop();
|
||||
masterFD.stop("zen disco stop");
|
||||
nodesFD.stop();
|
||||
initialStateSent.set(false);
|
||||
DiscoveryNodes nodes = nodes();
|
||||
if (sendLeaveRequest) {
|
||||
if (nodes.masterNode() == null) {
|
||||
|
@ -290,16 +281,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return clusterService.localNode();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addListener(InitialStateDiscoveryListener listener) {
|
||||
this.initialStateListeners.add(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeListener(InitialStateDiscoveryListener listener) {
|
||||
this.initialStateListeners.remove(listener);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String nodeDescription() {
|
||||
return clusterName.value() + "/" + clusterService.localNode().id();
|
||||
|
@ -357,6 +338,11 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
return new DiscoveryStats(queueStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
return discoverySettings;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getMinimumMasterNodes() {
|
||||
return electMaster.minimumMasterNodes();
|
||||
|
@ -403,7 +389,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
joinThreadControl.markThreadAsDone(currentThread);
|
||||
// we only starts nodesFD if we are master (it may be that we received a cluster state while pinging)
|
||||
nodesFD.updateNodesAndPing(state); // start the nodes FD
|
||||
sendInitialStateEventIfNeeded();
|
||||
long count = clusterJoinsCounter.incrementAndGet();
|
||||
logger.trace("cluster joins counter set to [{}] (elected as master)", count);
|
||||
}
|
||||
|
@ -591,7 +576,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
sendInitialStateEventIfNeeded();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -630,7 +614,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
sendInitialStateEventIfNeeded();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
@ -679,7 +662,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
|
||||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
sendInitialStateEventIfNeeded();
|
||||
}
|
||||
|
||||
});
|
||||
|
@ -773,7 +755,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
@Override
|
||||
public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) {
|
||||
try {
|
||||
sendInitialStateEventIfNeeded();
|
||||
if (newClusterState != null) {
|
||||
publishClusterState.pendingStatesQueue().markAsProcessed(newClusterState);
|
||||
}
|
||||
|
@ -1005,14 +986,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent<Discovery> implemen
|
|||
}
|
||||
}
|
||||
|
||||
private void sendInitialStateEventIfNeeded() {
|
||||
if (initialStateSent.compareAndSet(false, true)) {
|
||||
for (InitialStateDiscoveryListener listener : initialStateListeners) {
|
||||
listener.initialStateProcessed();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private class NewPendingClusterStateListener implements PublishClusterStateAction.NewPendingClusterStateListener {
|
||||
|
||||
@Override
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.elasticsearch.common.settings.Setting;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
@ -79,8 +78,6 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
|
||||
private final ClusterService clusterService;
|
||||
|
||||
private final DiscoveryService discoveryService;
|
||||
|
||||
private final TimeValue recoverAfterTime;
|
||||
private final int recoverAfterNodes;
|
||||
private final int expectedNodes;
|
||||
|
@ -95,14 +92,12 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
|
||||
@Inject
|
||||
public GatewayService(Settings settings, AllocationService allocationService, ClusterService clusterService,
|
||||
DiscoveryService discoveryService, ThreadPool threadPool,
|
||||
NodeEnvironment nodeEnvironment, GatewayMetaState metaState,
|
||||
ThreadPool threadPool, NodeEnvironment nodeEnvironment, GatewayMetaState metaState,
|
||||
TransportNodesListGatewayMetaState listGatewayMetaState, Discovery discovery) {
|
||||
super(settings);
|
||||
this.gateway = new Gateway(settings, clusterService, nodeEnvironment, metaState, listGatewayMetaState, discovery);
|
||||
this.allocationService = allocationService;
|
||||
this.clusterService = clusterService;
|
||||
this.discoveryService = discoveryService;
|
||||
this.threadPool = threadPool;
|
||||
// allow to control a delay of when indices will get created
|
||||
this.expectedNodes = EXPECTED_NODES_SETTING.get(this.settings);
|
||||
|
@ -162,7 +157,7 @@ public class GatewayService extends AbstractLifecycleComponent<GatewayService> i
|
|||
}
|
||||
|
||||
DiscoveryNodes nodes = state.nodes();
|
||||
if (state.blocks().hasGlobalBlock(discoveryService.getNoMasterBlock())) {
|
||||
if (state.nodes().masterNodeId() == null) {
|
||||
logger.debug("not recovering from gateway, no master elected yet");
|
||||
} else if (recoverAfterNodes != -1 && (nodes.masterAndDataNodes().size()) < recoverAfterNodes) {
|
||||
logger.debug("not recovering from gateway, nodes_size (data+master) [" + nodes.masterAndDataNodes().size() + "] < recover_after_nodes [" + recoverAfterNodes + "]");
|
||||
|
|
|
@ -187,12 +187,14 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
}
|
||||
|
||||
if (nodeShardState.storeException() == null) {
|
||||
if (allocationId == null && nodeShardState.legacyVersion() != ShardStateMetaData.NO_VERSION) {
|
||||
// old shard with no allocation id, assign dummy value so that it gets added below in case of matchAnyShard
|
||||
allocationId = "_n/a_";
|
||||
if (allocationId == null && nodeShardState.legacyVersion() == ShardStateMetaData.NO_VERSION) {
|
||||
logger.trace("[{}] on node [{}] has no shard state information", shard, nodeShardState.getNode());
|
||||
} else if (allocationId != null) {
|
||||
assert nodeShardState.legacyVersion() == ShardStateMetaData.NO_VERSION : "Allocation id and legacy version cannot be both present";
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}]", shard, nodeShardState.getNode(), allocationId);
|
||||
} else {
|
||||
logger.trace("[{}] on node [{}] has no allocation id, out-dated shard (shard state version: [{}])", shard, nodeShardState.getNode(), nodeShardState.legacyVersion());
|
||||
}
|
||||
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}] of shard", shard, nodeShardState.getNode(), allocationId);
|
||||
} else {
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}] but the store can not be opened, treating as no allocation id", nodeShardState.storeException(), shard, nodeShardState.getNode(), allocationId);
|
||||
allocationId = null;
|
||||
|
@ -299,9 +301,20 @@ public abstract class PrimaryShardAllocator extends AbstractComponent {
|
|||
continue;
|
||||
}
|
||||
|
||||
// no version means it does not exists, which is what the API returns, and what we expect to
|
||||
if (nodeShardState.storeException() == null) {
|
||||
logger.trace("[{}] on node [{}] has version [{}] of shard", shard, nodeShardState.getNode(), version);
|
||||
if (version == ShardStateMetaData.NO_VERSION && nodeShardState.allocationId() == null) {
|
||||
logger.trace("[{}] on node [{}] has no shard state information", shard, nodeShardState.getNode());
|
||||
} else if (version != ShardStateMetaData.NO_VERSION) {
|
||||
assert nodeShardState.allocationId() == null : "Allocation id and legacy version cannot be both present";
|
||||
logger.trace("[{}] on node [{}] has version [{}] of shard", shard, nodeShardState.getNode(), version);
|
||||
} else {
|
||||
// shard was already selected in a 5.x cluster as primary for recovery, was initialized (and wrote a new state file) but
|
||||
// did not make it to STARTED state before the cluster crashed (otherwise list of active allocation ids would be
|
||||
// non-empty and allocation id - based allocation mode would be chosen).
|
||||
// Prefer this shard copy again.
|
||||
version = Long.MAX_VALUE;
|
||||
logger.trace("[{}] on node [{}] has allocation id [{}]", shard, nodeShardState.getNode(), nodeShardState.allocationId());
|
||||
}
|
||||
} else {
|
||||
// when there is an store exception, we disregard the reported version and assign it as no version (same as shard does not exist)
|
||||
logger.trace("[{}] on node [{}] has version [{}] but the store can not be opened, treating no version", nodeShardState.storeException(), shard, nodeShardState.getNode(), version);
|
||||
|
|
|
@ -215,7 +215,7 @@ public final class BitsetFilterCache extends AbstractIndexComponent implements L
|
|||
|
||||
@Override
|
||||
public IndexWarmer.TerminationHandle warmNewReaders(final IndexShard indexShard, final Engine.Searcher searcher) {
|
||||
if (indexSettings.getIndex().equals(indexShard.getIndexSettings().getIndex()) == false) {
|
||||
if (indexSettings.getIndex().equals(indexShard.indexSettings().getIndex()) == false) {
|
||||
// this is from a different index
|
||||
return TerminationHandle.NO_WAIT;
|
||||
}
|
||||
|
|
|
@ -153,7 +153,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
private final EngineConfig engineConfig;
|
||||
private final TranslogConfig translogConfig;
|
||||
private final IndexEventListener indexEventListener;
|
||||
private final IndexSettings idxSettings;
|
||||
|
||||
/** How many bytes we are currently moving to disk, via either IndexWriter.flush or refresh. IndexingMemoryController polls this
|
||||
* across all shards to decide if throttling is necessary because moving bytes to disk is falling behind vs incoming documents
|
||||
|
@ -205,7 +204,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
IndexEventListener indexEventListener, IndexSearcherWrapper indexSearcherWrapper, NodeServicesProvider provider, SearchSlowLog slowLog, Engine.Warmer warmer, IndexingOperationListener... listeners) {
|
||||
super(shardId, indexSettings);
|
||||
final Settings settings = indexSettings.getSettings();
|
||||
this.idxSettings = indexSettings;
|
||||
this.codecService = new CodecService(mapperService, logger);
|
||||
this.warmer = warmer;
|
||||
this.deletionPolicy = new SnapshotDeletionPolicy(new KeepOnlyLastCommitDeletionPolicy());
|
||||
|
@ -248,7 +246,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
this.engineConfig = newEngineConfig(translogConfig, cachingPolicy);
|
||||
this.suspendableRefContainer = new SuspendableRefContainer();
|
||||
this.searcherWrapper = indexSearcherWrapper;
|
||||
QueryShardContext queryShardContext = new QueryShardContext(idxSettings, indexCache.bitsetFilterCache(), indexFieldDataService, mapperService, similarityService, provider.getScriptService(), provider.getIndicesQueriesRegistry());
|
||||
QueryShardContext queryShardContext = new QueryShardContext(indexSettings, indexCache.bitsetFilterCache(), indexFieldDataService, mapperService, similarityService, provider.getScriptService(), provider.getIndicesQueriesRegistry());
|
||||
this.percolatorQueriesRegistry = new PercolatorQueriesRegistry(shardId, indexSettings, queryShardContext);
|
||||
}
|
||||
|
||||
|
@ -256,10 +254,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
return this.store;
|
||||
}
|
||||
|
||||
public IndexSettings getIndexSettings() {
|
||||
return idxSettings;
|
||||
}
|
||||
|
||||
/** returns true if this shard supports indexing (i.e., write) operations. */
|
||||
public boolean canIndex() {
|
||||
return true;
|
||||
|
@ -319,8 +313,9 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
* unless explicitly disabled.
|
||||
*
|
||||
* @throws IndexShardRelocatedException if shard is marked as relocated and relocation aborted
|
||||
* @throws IOException if shard state could not be persisted
|
||||
*/
|
||||
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) {
|
||||
public void updateRoutingEntry(final ShardRouting newRouting, final boolean persistState) throws IOException {
|
||||
final ShardRouting currentRouting = this.shardRouting;
|
||||
if (!newRouting.shardId().equals(shardId())) {
|
||||
throw new IllegalArgumentException("Trying to set a routing entry with shardId [" + newRouting.shardId() + "] on a shard with shardId [" + shardId() + "]");
|
||||
|
@ -328,57 +323,54 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
if ((currentRouting == null || newRouting.isSameAllocation(currentRouting)) == false) {
|
||||
throw new IllegalArgumentException("Trying to set a routing entry with a different allocation. Current " + currentRouting + ", new " + newRouting);
|
||||
}
|
||||
try {
|
||||
if (currentRouting != null) {
|
||||
if (!newRouting.primary() && currentRouting.primary()) {
|
||||
logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
|
||||
}
|
||||
// if its the same routing, return
|
||||
if (currentRouting.equals(newRouting)) {
|
||||
return;
|
||||
}
|
||||
if (currentRouting != null) {
|
||||
if (!newRouting.primary() && currentRouting.primary()) {
|
||||
logger.warn("suspect illegal state: trying to move shard from primary mode to replica mode");
|
||||
}
|
||||
// if its the same routing, return
|
||||
if (currentRouting.equals(newRouting)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (state == IndexShardState.POST_RECOVERY) {
|
||||
// if the state is started or relocating (cause it might move right away from started to relocating)
|
||||
// then move to STARTED
|
||||
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
|
||||
// we want to refresh *before* we move to internal STARTED state
|
||||
try {
|
||||
getEngine().refresh("cluster_state_started");
|
||||
} catch (Throwable t) {
|
||||
logger.debug("failed to refresh due to move to cluster wide started", t);
|
||||
}
|
||||
if (state == IndexShardState.POST_RECOVERY) {
|
||||
// if the state is started or relocating (cause it might move right away from started to relocating)
|
||||
// then move to STARTED
|
||||
if (newRouting.state() == ShardRoutingState.STARTED || newRouting.state() == ShardRoutingState.RELOCATING) {
|
||||
// we want to refresh *before* we move to internal STARTED state
|
||||
try {
|
||||
getEngine().refresh("cluster_state_started");
|
||||
} catch (Throwable t) {
|
||||
logger.debug("failed to refresh due to move to cluster wide started", t);
|
||||
}
|
||||
|
||||
boolean movedToStarted = false;
|
||||
synchronized (mutex) {
|
||||
// do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
|
||||
if (state == IndexShardState.POST_RECOVERY) {
|
||||
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
|
||||
movedToStarted = true;
|
||||
} else {
|
||||
logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
|
||||
}
|
||||
}
|
||||
if (movedToStarted) {
|
||||
indexEventListener.afterIndexShardStarted(this);
|
||||
boolean movedToStarted = false;
|
||||
synchronized (mutex) {
|
||||
// do the check under a mutex, so we make sure to only change to STARTED if in POST_RECOVERY
|
||||
if (state == IndexShardState.POST_RECOVERY) {
|
||||
changeState(IndexShardState.STARTED, "global state is [" + newRouting.state() + "]");
|
||||
movedToStarted = true;
|
||||
} else {
|
||||
logger.debug("state [{}] not changed, not in POST_RECOVERY, global state is [{}]", state, newRouting.state());
|
||||
}
|
||||
}
|
||||
if (movedToStarted) {
|
||||
indexEventListener.afterIndexShardStarted(this);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (state == IndexShardState.RELOCATED &&
|
||||
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
|
||||
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
|
||||
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
|
||||
// active primaries.
|
||||
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
|
||||
}
|
||||
this.shardRouting = newRouting;
|
||||
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
|
||||
} finally {
|
||||
if (persistState) {
|
||||
persistMetadata(newRouting, currentRouting);
|
||||
}
|
||||
if (state == IndexShardState.RELOCATED &&
|
||||
(newRouting.relocating() == false || newRouting.equalsIgnoringMetaData(currentRouting) == false)) {
|
||||
// if the shard is marked as RELOCATED we have to fail when any changes in shard routing occur (e.g. due to recovery
|
||||
// failure / cancellation). The reason is that at the moment we cannot safely move back to STARTED without risking two
|
||||
// active primaries.
|
||||
throw new IndexShardRelocatedException(shardId(), "Shard is marked as relocated, cannot safely move to state " + newRouting.state());
|
||||
}
|
||||
this.shardRouting = newRouting;
|
||||
indexEventListener.shardRoutingChanged(this, currentRouting, newRouting);
|
||||
if (persistState) {
|
||||
persistMetadata(newRouting, currentRouting);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -733,7 +725,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
luceneVersion = segment.getVersion();
|
||||
}
|
||||
}
|
||||
return luceneVersion == null ? idxSettings.getIndexVersionCreated().luceneVersion : luceneVersion;
|
||||
return luceneVersion == null ? indexSettings.getIndexVersionCreated().luceneVersion : luceneVersion;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1046,18 +1038,6 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deletes the shards metadata state. This method can only be executed if the shard is not active.
|
||||
*
|
||||
* @throws IOException if the delete fails
|
||||
*/
|
||||
public void deleteShardState() throws IOException {
|
||||
if (this.routingEntry() != null && this.routingEntry().active()) {
|
||||
throw new IllegalStateException("Can't delete shard state on an active shard");
|
||||
}
|
||||
MetaDataStateFormat.deleteMetaState(shardPath().getDataPath());
|
||||
}
|
||||
|
||||
public boolean isActive() {
|
||||
return active.get();
|
||||
}
|
||||
|
@ -1070,7 +1050,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
// we are the first primary, recover from the gateway
|
||||
// if its post api allocation, the index should exists
|
||||
assert shardRouting.primary() : "recover from store only makes sense if the shard is a primary shard";
|
||||
boolean shouldExist = shardRouting.allocatedPostIndexCreate(idxSettings.getIndexMetaData());
|
||||
boolean shouldExist = shardRouting.allocatedPostIndexCreate(indexSettings.getIndexMetaData());
|
||||
|
||||
StoreRecovery storeRecovery = new StoreRecovery(shardId, logger);
|
||||
return storeRecovery.recoverFromStore(this, shouldExist, localNode);
|
||||
|
@ -1344,27 +1324,25 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
}
|
||||
|
||||
// pkg private for testing
|
||||
void persistMetadata(ShardRouting newRouting, ShardRouting currentRouting) {
|
||||
void persistMetadata(ShardRouting newRouting, @Nullable ShardRouting currentRouting) throws IOException {
|
||||
assert newRouting != null : "newRouting must not be null";
|
||||
if (newRouting.active()) {
|
||||
try {
|
||||
final String writeReason;
|
||||
if (currentRouting == null) {
|
||||
writeReason = "freshly started, allocation id [" + newRouting.allocationId() + "]";
|
||||
} else if (currentRouting.equals(newRouting) == false) {
|
||||
writeReason = "routing changed from " + currentRouting + " to " + newRouting;
|
||||
} else {
|
||||
logger.trace("{} skip writing shard state, has been written before", shardId);
|
||||
return;
|
||||
}
|
||||
final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.primary(), getIndexUUID(), newRouting.allocationId());
|
||||
logger.trace("{} writing shard state, reason [{}]", shardId, writeReason);
|
||||
ShardStateMetaData.FORMAT.write(newShardStateMetadata, newShardStateMetadata.legacyVersion, shardPath().getShardStatePath());
|
||||
} catch (IOException e) { // this is how we used to handle it.... :(
|
||||
logger.warn("failed to write shard state", e);
|
||||
// we failed to write the shard state, we will try and write
|
||||
// it next time...
|
||||
|
||||
// only persist metadata if routing information that is persisted in shard state metadata actually changed
|
||||
if (currentRouting == null
|
||||
|| currentRouting.primary() != newRouting.primary()
|
||||
|| currentRouting.allocationId().equals(newRouting.allocationId()) == false) {
|
||||
assert currentRouting == null || currentRouting.isSameAllocation(newRouting);
|
||||
final String writeReason;
|
||||
if (currentRouting == null) {
|
||||
writeReason = "initial state with allocation id [" + newRouting.allocationId() + "]";
|
||||
} else {
|
||||
writeReason = "routing changed from " + currentRouting + " to " + newRouting;
|
||||
}
|
||||
logger.trace("{} writing shard state, reason [{}]", shardId, writeReason);
|
||||
final ShardStateMetaData newShardStateMetadata = new ShardStateMetaData(newRouting.primary(), getIndexUUID(), newRouting.allocationId());
|
||||
ShardStateMetaData.FORMAT.write(newShardStateMetadata, newShardStateMetadata.legacyVersion, shardPath().getShardStatePath());
|
||||
} else {
|
||||
logger.trace("{} skip writing shard state, has been written before", shardId);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1396,7 +1374,7 @@ public class IndexShard extends AbstractIndexShardComponent {
|
|||
return new EngineConfig(shardId,
|
||||
threadPool, indexSettings, warmer, store, deletionPolicy, indexSettings.getMergePolicy(),
|
||||
mapperService.indexAnalyzer(), similarityService.similarity(mapperService), codecService, shardEventListener, translogRecoveryPerformer, indexCache.query(), cachingPolicy, translogConfig,
|
||||
idxSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
|
||||
indexSettings.getSettings().getAsTime(IndexingMemoryController.SHARD_INACTIVE_TIME_SETTING, IndexingMemoryController.SHARD_DEFAULT_INACTIVE_TIME));
|
||||
}
|
||||
|
||||
public Releasable acquirePrimaryOperationLock() {
|
||||
|
|
|
@ -51,12 +51,12 @@ public final class ShadowIndexShard extends IndexShard {
|
|||
|
||||
/**
|
||||
* In addition to the regular accounting done in
|
||||
* {@link IndexShard#updateRoutingEntry(org.elasticsearch.cluster.routing.ShardRouting, boolean)},
|
||||
* {@link IndexShard#updateRoutingEntry(ShardRouting, boolean)},
|
||||
* if this shadow replica needs to be promoted to a primary, the shard is
|
||||
* failed in order to allow a new primary to be re-allocated.
|
||||
*/
|
||||
@Override
|
||||
public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) {
|
||||
public void updateRoutingEntry(ShardRouting newRouting, boolean persistState) throws IOException {
|
||||
if (newRouting.primary() == true) {// becoming a primary
|
||||
throw new IllegalStateException("can't promote shard to primary");
|
||||
}
|
||||
|
|
|
@ -904,7 +904,7 @@ public class IndicesService extends AbstractLifecycleComponent<IndicesService> i
|
|||
if (!CACHEABLE_SEARCH_TYPES.contains(context.searchType())) {
|
||||
return false;
|
||||
}
|
||||
IndexSettings settings = context.indexShard().getIndexSettings();
|
||||
IndexSettings settings = context.indexShard().indexSettings();
|
||||
// if not explicitly set in the request, use the index setting, if not, use the request
|
||||
if (request.requestCache() == null) {
|
||||
if (settings.getValue(IndicesRequestCache.INDEX_CACHE_REQUEST_ENABLED_SETTING) == false) {
|
||||
|
|
|
@ -342,8 +342,6 @@ public class RecoveryTarget extends AbstractRefCounted implements RecoveryTarget
|
|||
// first, we go and move files that were created with the recovery id suffix to
|
||||
// the actual names, its ok if we have a corrupted index here, since we have replicas
|
||||
// to recover from in case of a full cluster shutdown just when this code executes...
|
||||
indexShard().deleteShardState(); // we have to delete it first since even if we fail to rename the shard
|
||||
// might be invalid
|
||||
renameAllTempFiles();
|
||||
final Store store = store();
|
||||
// now write checksums
|
||||
|
|
|
@ -22,6 +22,7 @@ package org.elasticsearch.node;
|
|||
import org.apache.lucene.util.IOUtils;
|
||||
import org.elasticsearch.Build;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchTimeoutException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ActionModule;
|
||||
import org.elasticsearch.cache.recycler.PageCacheRecycler;
|
||||
|
@ -30,9 +31,13 @@ import org.elasticsearch.client.node.NodeClientModule;
|
|||
import org.elasticsearch.cluster.ClusterModule;
|
||||
import org.elasticsearch.cluster.ClusterNameModule;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.ClusterStateObserver;
|
||||
import org.elasticsearch.cluster.MasterNodeChangePredicate;
|
||||
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingService;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.common.StopWatch;
|
||||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
|
@ -52,9 +57,10 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.common.transport.BoundTransportAddress;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.env.EnvironmentModule;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
|
@ -111,6 +117,7 @@ import java.util.Arrays;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.function.Function;
|
||||
|
||||
|
@ -154,7 +161,7 @@ public class Node implements Closeable {
|
|||
|
||||
protected Node(Environment tmpEnv, Version version, Collection<Class<? extends Plugin>> classpathPlugins) {
|
||||
Settings tmpSettings = settingsBuilder().put(tmpEnv.settings())
|
||||
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();
|
||||
.put(Client.CLIENT_TYPE_SETTING_S.getKey(), CLIENT_TYPE).build();
|
||||
tmpSettings = TribeService.processSettings(tmpSettings);
|
||||
|
||||
ESLogger logger = Loggers.getLogger(Node.class, NODE_NAME_SETTING.get(tmpSettings));
|
||||
|
@ -164,7 +171,7 @@ public class Node implements Closeable {
|
|||
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("using config [{}], data [{}], logs [{}], plugins [{}]",
|
||||
tmpEnv.configFile(), Arrays.toString(tmpEnv.dataFiles()), tmpEnv.logsFile(), tmpEnv.pluginsFile());
|
||||
tmpEnv.configFile(), Arrays.toString(tmpEnv.dataFiles()), tmpEnv.logsFile(), tmpEnv.pluginsFile());
|
||||
}
|
||||
|
||||
this.pluginsService = new PluginsService(tmpSettings, tmpEnv.modulesFile(), tmpEnv.pluginsFile(), classpathPlugins);
|
||||
|
@ -282,28 +289,73 @@ public class Node implements Closeable {
|
|||
injector.getInstance(MonitorService.class).start();
|
||||
injector.getInstance(RestController.class).start();
|
||||
|
||||
assert injector.getInstance(ClusterService.class) instanceof InternalClusterService :
|
||||
"node cluster service implementation must inherit from InternalClusterService";
|
||||
final InternalClusterService clusterService = (InternalClusterService) injector.getInstance(ClusterService.class);
|
||||
|
||||
// TODO hack around circular dependencies problems
|
||||
injector.getInstance(GatewayAllocator.class).setReallocation(injector.getInstance(ClusterService.class), injector.getInstance(RoutingService.class));
|
||||
injector.getInstance(GatewayAllocator.class).setReallocation(clusterService, injector.getInstance(RoutingService.class));
|
||||
|
||||
injector.getInstance(ResourceWatcherService.class).start();
|
||||
injector.getInstance(GatewayService.class).start();
|
||||
Discovery discovery = injector.getInstance(Discovery.class);
|
||||
clusterService.addInitialStateBlock(discovery.getDiscoverySettings().getNoMasterBlock());
|
||||
clusterService.setClusterStatePublisher(discovery::publish);
|
||||
|
||||
// start before the cluster service since it adds/removes initial Cluster state blocks
|
||||
final TribeService tribeService = injector.getInstance(TribeService.class);
|
||||
tribeService.start();
|
||||
|
||||
|
||||
// Start the transport service now so the publish address will be added to the local disco node in ClusterService
|
||||
TransportService transportService = injector.getInstance(TransportService.class);
|
||||
transportService.start();
|
||||
injector.getInstance(ClusterService.class).start();
|
||||
clusterService.start();
|
||||
|
||||
// start after cluster service so the local disco is known
|
||||
DiscoveryService discoService = injector.getInstance(DiscoveryService.class).start();
|
||||
|
||||
|
||||
discovery.start();
|
||||
transportService.acceptIncomingRequests();
|
||||
discoService.joinClusterAndWaitForInitialState();
|
||||
discovery.startInitialJoin();
|
||||
|
||||
// tribe nodes don't have a master so we shouldn't register an observer
|
||||
if (DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis() > 0) {
|
||||
final ThreadPool thread = injector.getInstance(ThreadPool.class);
|
||||
ClusterStateObserver observer = new ClusterStateObserver(clusterService, null, logger, thread.getThreadContext());
|
||||
final CountDownLatch latch = new CountDownLatch(1);
|
||||
if (observer.observedState().nodes().masterNodeId() == null) {
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onClusterServiceClose() {
|
||||
latch.countDown();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onTimeout(TimeValue timeout) {
|
||||
assert false;
|
||||
}
|
||||
// use null timeout as we use timeout on the latchwait
|
||||
}, MasterNodeChangePredicate.INSTANCE, null);
|
||||
}
|
||||
|
||||
try {
|
||||
latch.await(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.get(settings).millis(), TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
throw new ElasticsearchTimeoutException("Interrupted while waiting for initial discovery state");
|
||||
}
|
||||
}
|
||||
|
||||
if (settings.getAsBoolean("http.enabled", true)) {
|
||||
injector.getInstance(HttpServer.class).start();
|
||||
}
|
||||
injector.getInstance(TribeService.class).start();
|
||||
|
||||
// start nodes now, after the http server, because it may take some time
|
||||
tribeService.startNodes();
|
||||
|
||||
|
||||
if (WRITE_PORTS_FIELD_SETTING.get(settings)) {
|
||||
if (settings.getAsBoolean("http.enabled", true)) {
|
||||
|
@ -340,7 +392,6 @@ public class Node implements Closeable {
|
|||
injector.getInstance(IndicesTTLService.class).stop();
|
||||
injector.getInstance(RoutingService.class).stop();
|
||||
injector.getInstance(ClusterService.class).stop();
|
||||
injector.getInstance(DiscoveryService.class).stop();
|
||||
injector.getInstance(MonitorService.class).stop();
|
||||
injector.getInstance(GatewayService.class).stop();
|
||||
injector.getInstance(SearchService.class).stop();
|
||||
|
@ -378,50 +429,50 @@ public class Node implements Closeable {
|
|||
toClose.add(injector.getInstance(TribeService.class));
|
||||
toClose.add(() -> stopWatch.stop().start("node_service"));
|
||||
toClose.add(injector.getInstance(NodeService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("http"));
|
||||
toClose.add(() -> stopWatch.stop().start("http"));
|
||||
if (settings.getAsBoolean("http.enabled", true)) {
|
||||
toClose.add(injector.getInstance(HttpServer.class));
|
||||
}
|
||||
toClose.add(() ->stopWatch.stop().start("snapshot_service"));
|
||||
toClose.add(() -> stopWatch.stop().start("snapshot_service"));
|
||||
toClose.add(injector.getInstance(SnapshotsService.class));
|
||||
toClose.add(injector.getInstance(SnapshotShardsService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("client"));
|
||||
toClose.add(() -> stopWatch.stop().start("client"));
|
||||
Releasables.close(injector.getInstance(Client.class));
|
||||
toClose.add(() ->stopWatch.stop().start("indices_cluster"));
|
||||
toClose.add(() -> stopWatch.stop().start("indices_cluster"));
|
||||
toClose.add(injector.getInstance(IndicesClusterStateService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("indices"));
|
||||
toClose.add(() -> stopWatch.stop().start("indices"));
|
||||
toClose.add(injector.getInstance(IndicesTTLService.class));
|
||||
toClose.add(injector.getInstance(IndicesService.class));
|
||||
// close filter/fielddata caches after indices
|
||||
toClose.add(injector.getInstance(IndicesStore.class));
|
||||
toClose.add(() ->stopWatch.stop().start("routing"));
|
||||
toClose.add(() -> stopWatch.stop().start("routing"));
|
||||
toClose.add(injector.getInstance(RoutingService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("cluster"));
|
||||
toClose.add(() -> stopWatch.stop().start("cluster"));
|
||||
toClose.add(injector.getInstance(ClusterService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("discovery"));
|
||||
toClose.add(injector.getInstance(DiscoveryService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("monitor"));
|
||||
toClose.add(() -> stopWatch.stop().start("discovery"));
|
||||
toClose.add(injector.getInstance(Discovery.class));
|
||||
toClose.add(() -> stopWatch.stop().start("monitor"));
|
||||
toClose.add(injector.getInstance(MonitorService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("gateway"));
|
||||
toClose.add(() -> stopWatch.stop().start("gateway"));
|
||||
toClose.add(injector.getInstance(GatewayService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("search"));
|
||||
toClose.add(() -> stopWatch.stop().start("search"));
|
||||
toClose.add(injector.getInstance(SearchService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("rest"));
|
||||
toClose.add(() -> stopWatch.stop().start("rest"));
|
||||
toClose.add(injector.getInstance(RestController.class));
|
||||
toClose.add(() ->stopWatch.stop().start("transport"));
|
||||
toClose.add(() -> stopWatch.stop().start("transport"));
|
||||
toClose.add(injector.getInstance(TransportService.class));
|
||||
toClose.add(() ->stopWatch.stop().start("percolator_service"));
|
||||
toClose.add(() -> stopWatch.stop().start("percolator_service"));
|
||||
toClose.add(injector.getInstance(PercolatorService.class));
|
||||
|
||||
for (Class<? extends LifecycleComponent> plugin : pluginsService.nodeServices()) {
|
||||
toClose.add(() ->stopWatch.stop().start("plugin(" + plugin.getName() + ")"));
|
||||
toClose.add(() -> stopWatch.stop().start("plugin(" + plugin.getName() + ")"));
|
||||
toClose.add(injector.getInstance(plugin));
|
||||
}
|
||||
|
||||
toClose.add(() ->stopWatch.stop().start("script"));
|
||||
toClose.add(() -> stopWatch.stop().start("script"));
|
||||
toClose.add(injector.getInstance(ScriptService.class));
|
||||
|
||||
toClose.add(() ->stopWatch.stop().start("thread_pool"));
|
||||
toClose.add(() -> stopWatch.stop().start("thread_pool"));
|
||||
// TODO this should really use ThreadPool.terminate()
|
||||
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdown());
|
||||
toClose.add(() -> {
|
||||
|
@ -432,7 +483,7 @@ public class Node implements Closeable {
|
|||
}
|
||||
});
|
||||
|
||||
toClose.add(() ->stopWatch.stop().start("thread_pool_force_shutdown"));
|
||||
toClose.add(() -> stopWatch.stop().start("thread_pool_force_shutdown"));
|
||||
toClose.add(() -> injector.getInstance(ThreadPool.class).shutdownNow());
|
||||
toClose.add(() -> stopWatch.stop());
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ public class GeoDistanceSortParser implements SortParser {
|
|||
MultiValueMode sortMode = null;
|
||||
NestedInnerQueryParseSupport nestedHelper = null;
|
||||
|
||||
final boolean indexCreatedBeforeV2_0 = context.indexShard().getIndexSettings().getIndexVersionCreated().before(Version.V_2_0_0);
|
||||
final boolean indexCreatedBeforeV2_0 = context.indexShard().indexSettings().getIndexVersionCreated().before(Version.V_2_0_0);
|
||||
boolean coerce = GeoDistanceSortBuilder.DEFAULT_COERCE;
|
||||
boolean ignoreMalformed = GeoDistanceSortBuilder.DEFAULT_IGNORE_MALFORMED;
|
||||
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
package org.elasticsearch.tribe;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
|
@ -48,7 +47,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.set.Sets;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.gateway.GatewayService;
|
||||
import org.elasticsearch.node.Node;
|
||||
|
@ -62,7 +61,6 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
import java.util.function.Function;
|
||||
import java.util.function.Predicate;
|
||||
|
||||
import static java.util.Collections.unmodifiableMap;
|
||||
|
||||
|
@ -112,7 +110,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
sb.put(Node.NODE_CLIENT_SETTING.getKey(), true); // this node should just act as a node client
|
||||
sb.put(DiscoveryModule.DISCOVERY_TYPE_SETTING.getKey(), "local"); // a tribe node should not use zen discovery
|
||||
// nothing is going to be discovered, since no master will be elected
|
||||
sb.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
|
||||
sb.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), 0);
|
||||
if (sb.get("cluster.name") == null) {
|
||||
sb.put("cluster.name", "tribe_" + Strings.randomBase64UUID()); // make sure it won't join other tribe nodes in the same JVM
|
||||
}
|
||||
|
@ -138,7 +136,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
return s;
|
||||
}
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: [" + s + "]");
|
||||
"Invalid value for [tribe.on_conflict] must be either [any, drop or start with prefer_] but was: [" + s + "]");
|
||||
}
|
||||
}, false, Setting.Scope.CLUSTER);
|
||||
|
||||
|
@ -154,7 +152,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
Collections.emptyList(), Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
|
||||
public static final Set<String> TRIBE_SETTING_KEYS = Sets.newHashSet(TRIBE_NAME_SETTING.getKey(), ON_CONFLICT_SETTING.getKey(),
|
||||
BLOCKS_METADATA_INDICES_SETTING.getKey(), BLOCKS_METADATA_SETTING.getKey(), BLOCKS_READ_INDICES_SETTING.getKey(), BLOCKS_WRITE_INDICES_SETTING.getKey(), BLOCKS_WRITE_SETTING.getKey());
|
||||
BLOCKS_METADATA_INDICES_SETTING.getKey(), BLOCKS_METADATA_SETTING.getKey(), BLOCKS_READ_INDICES_SETTING.getKey(), BLOCKS_WRITE_INDICES_SETTING.getKey(), BLOCKS_WRITE_SETTING.getKey());
|
||||
|
||||
private final String onConflict;
|
||||
private final Set<String> droppedIndices = ConcurrentCollections.newConcurrentSet();
|
||||
|
@ -162,7 +160,7 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
private final List<Node> nodes = new CopyOnWriteArrayList<>();
|
||||
|
||||
@Inject
|
||||
public TribeService(Settings settings, ClusterService clusterService, DiscoveryService discoveryService) {
|
||||
public TribeService(Settings settings, ClusterService clusterService) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
Map<String, Settings> nodesSettings = new HashMap<>(settings.getGroups("tribe", true));
|
||||
|
@ -183,38 +181,36 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
nodes.add(new TribeClientNode(sb.build()));
|
||||
}
|
||||
|
||||
String[] blockIndicesWrite = Strings.EMPTY_ARRAY;
|
||||
String[] blockIndicesRead = Strings.EMPTY_ARRAY;
|
||||
String[] blockIndicesMetadata = Strings.EMPTY_ARRAY;
|
||||
this.blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
this.blockIndicesRead = BLOCKS_READ_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
this.blockIndicesWrite = BLOCKS_WRITE_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
|
||||
if (!nodes.isEmpty()) {
|
||||
// remove the initial election / recovery blocks since we are not going to have a
|
||||
// master elected in this single tribe node local "cluster"
|
||||
clusterService.removeInitialStateBlock(discoveryService.getNoMasterBlock());
|
||||
clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
|
||||
if (BLOCKS_WRITE_SETTING.get(settings)) {
|
||||
clusterService.addInitialStateBlock(TRIBE_WRITE_BLOCK);
|
||||
}
|
||||
blockIndicesWrite = BLOCKS_WRITE_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
if (BLOCKS_METADATA_SETTING.get(settings)) {
|
||||
clusterService.addInitialStateBlock(TRIBE_METADATA_BLOCK);
|
||||
}
|
||||
blockIndicesMetadata = BLOCKS_METADATA_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
blockIndicesRead = BLOCKS_READ_INDICES_SETTING.get(settings).toArray(Strings.EMPTY_ARRAY);
|
||||
for (Node node : nodes) {
|
||||
node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
|
||||
}
|
||||
}
|
||||
this.blockIndicesMetadata = blockIndicesMetadata;
|
||||
this.blockIndicesRead = blockIndicesRead;
|
||||
this.blockIndicesWrite = blockIndicesWrite;
|
||||
|
||||
this.onConflict = ON_CONFLICT_SETTING.get(settings);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doStart() {
|
||||
if (nodes.isEmpty() == false) {
|
||||
// remove the initial election / recovery blocks since we are not going to have a
|
||||
// master elected in this single tribe node local "cluster"
|
||||
clusterService.removeInitialStateBlock(DiscoverySettings.NO_MASTER_BLOCK_ID);
|
||||
clusterService.removeInitialStateBlock(GatewayService.STATE_NOT_RECOVERED_BLOCK);
|
||||
}
|
||||
}
|
||||
|
||||
public void startNodes() {
|
||||
for (Node node : nodes) {
|
||||
try {
|
||||
node.injector().getInstance(ClusterService.class).add(new TribeClusterStateListener(node));
|
||||
node.start();
|
||||
} catch (Throwable e) {
|
||||
// calling close is safe for non started nodes, we can just iterate over all
|
||||
|
@ -410,14 +406,14 @@ public class TribeService extends AbstractLifecycleComponent<TribeService> {
|
|||
}
|
||||
|
||||
private void removeIndex(ClusterBlocks.Builder blocks, MetaData.Builder metaData, RoutingTable.Builder routingTable,
|
||||
IndexMetaData index) {
|
||||
IndexMetaData index) {
|
||||
metaData.remove(index.getIndex().getName());
|
||||
routingTable.remove(index.getIndex().getName());
|
||||
blocks.removeIndexBlocks(index.getIndex().getName());
|
||||
}
|
||||
|
||||
private void addNewIndex(ClusterState tribeState, ClusterBlocks.Builder blocks, MetaData.Builder metaData,
|
||||
RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
|
||||
RoutingTable.Builder routingTable, IndexMetaData tribeIndex) {
|
||||
Settings tribeSettings = Settings.builder().put(tribeIndex.getSettings()).put(TRIBE_NAME_SETTING.getKey(), tribeName).build();
|
||||
metaData.put(IndexMetaData.builder(tribeIndex).settings(tribeSettings));
|
||||
routingTable.add(tribeState.routingTable().index(tribeIndex.getIndex()));
|
||||
|
|
|
@ -1,177 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
package org.elasticsearch;
|
||||
|
||||
import junit.framework.TestCase;
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.elasticsearch.test.ESTokenStreamTestCase;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Modifier;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.FileVisitResult;
|
||||
import java.nio.file.FileVisitor;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
* Simple class that ensures that all subclasses concrete of ESTestCase end with either Test | Tests
|
||||
*/
|
||||
public class NamingConventionTests extends ESTestCase {
|
||||
|
||||
// see https://github.com/elasticsearch/elasticsearch/issues/9945
|
||||
public void testNamingConventions()
|
||||
throws ClassNotFoundException, IOException, URISyntaxException {
|
||||
final Set<Class> notImplementing = new HashSet<>();
|
||||
final Set<Class> pureUnitTest = new HashSet<>();
|
||||
final Set<Class> missingSuffix = new HashSet<>();
|
||||
final Set<Class> integTestsInDisguise = new HashSet<>();
|
||||
final Set<Class> notRunnable = new HashSet<>();
|
||||
final Set<Class> innerClasses = new HashSet<>();
|
||||
String[] packages = {"org.elasticsearch", "org.apache.lucene"};
|
||||
for (final String packageName : packages) {
|
||||
final String path = "/" + packageName.replace('.', '/');
|
||||
final Path startPath = getDataPath(path);
|
||||
Files.walkFileTree(startPath, new FileVisitor<Path>() {
|
||||
private Path pkgPrefix = PathUtils.get(path).getParent();
|
||||
@Override
|
||||
public FileVisitResult preVisitDirectory(Path dir, BasicFileAttributes attrs) throws IOException {
|
||||
pkgPrefix = pkgPrefix.resolve(dir.getFileName());
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
try {
|
||||
String filename = file.getFileName().toString();
|
||||
if (filename.endsWith(".class")) {
|
||||
Class<?> clazz = loadClass(filename);
|
||||
if (clazz.getName().endsWith("Tests")) { // don't worry about the ones that match the pattern
|
||||
|
||||
if (ESIntegTestCase.class.isAssignableFrom(clazz)) {
|
||||
integTestsInDisguise.add(clazz);
|
||||
}
|
||||
if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) {
|
||||
notRunnable.add(clazz);
|
||||
} else if (isTestCase(clazz) == false) {
|
||||
notImplementing.add(clazz);
|
||||
} else if (Modifier.isStatic(clazz.getModifiers())) {
|
||||
innerClasses.add(clazz);
|
||||
}
|
||||
} else if (clazz.getName().endsWith("IT")) {
|
||||
if (isTestCase(clazz) == false) {
|
||||
notImplementing.add(clazz);
|
||||
}
|
||||
// otherwise fine
|
||||
} else if (Modifier.isAbstract(clazz.getModifiers()) == false && Modifier.isInterface(clazz.getModifiers()) == false) {
|
||||
if (isTestCase(clazz)) {
|
||||
missingSuffix.add(clazz);
|
||||
} else if (junit.framework.Test.class.isAssignableFrom(clazz)) {
|
||||
pureUnitTest.add(clazz);
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (ClassNotFoundException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
|
||||
private boolean isTestCase(Class<?> clazz) {
|
||||
return LuceneTestCase.class.isAssignableFrom(clazz);
|
||||
}
|
||||
|
||||
private Class<?> loadClass(String filename) throws ClassNotFoundException {
|
||||
StringBuilder pkg = new StringBuilder();
|
||||
for (Path p : pkgPrefix) {
|
||||
pkg.append(p.getFileName().toString()).append(".");
|
||||
}
|
||||
pkg.append(filename.substring(0, filename.length() - 6));
|
||||
return Thread.currentThread().getContextClassLoader().loadClass(pkg.toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException {
|
||||
throw exc;
|
||||
}
|
||||
|
||||
@Override
|
||||
public FileVisitResult postVisitDirectory(Path dir, IOException exc) throws IOException {
|
||||
pkgPrefix = pkgPrefix.getParent();
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
|
||||
}
|
||||
assertTrue(missingSuffix.remove(WrongName.class));
|
||||
assertTrue(missingSuffix.remove(WrongNameTheSecond.class));
|
||||
assertTrue(notRunnable.remove(DummyAbstractTests.class));
|
||||
assertTrue(notRunnable.remove(DummyInterfaceTests.class));
|
||||
assertTrue(innerClasses.remove(InnerTests.class));
|
||||
assertTrue(notImplementing.remove(NotImplementingTests.class));
|
||||
assertTrue(pureUnitTest.remove(PlainUnit.class));
|
||||
|
||||
String classesToSubclass = String.join(
|
||||
",",
|
||||
ESTestCase.class.getSimpleName(),
|
||||
ESTestCase.class.getSimpleName(),
|
||||
ESTokenStreamTestCase.class.getSimpleName(),
|
||||
LuceneTestCase.class.getSimpleName()
|
||||
);
|
||||
assertNoViolations("Not all subclasses of " + ESTestCase.class.getSimpleName() + " match the naming convention. Concrete classes must end with [Tests]:\n", missingSuffix);
|
||||
assertNoViolations("Classes ending with [Tests] are abstract or interfaces:\n", notRunnable);
|
||||
assertNoViolations("Found inner classes that are tests, which are excluded from the test runner:\n", innerClasses);
|
||||
assertNoViolations("Pure Unit-Test found must subclass one of [" + classesToSubclass + "]:\n", pureUnitTest);
|
||||
assertNoViolations("Classes ending with [Tests] must subclass [" + classesToSubclass + "]:\n", notImplementing);
|
||||
assertNoViolations("Subclasses of ESIntegTestCase should end with IT as they are integration tests:\n", integTestsInDisguise);
|
||||
}
|
||||
|
||||
private String join(Set<Class> set) {
|
||||
return set.stream().map(Object::toString).collect(Collectors.joining("\n"));
|
||||
}
|
||||
|
||||
private void assertNoViolations(String message, Set<Class> set) {
|
||||
assertTrue(message + join(set), set.isEmpty());
|
||||
}
|
||||
|
||||
/*
|
||||
* Some test the test classes
|
||||
*/
|
||||
|
||||
public static final class NotImplementingTests {}
|
||||
|
||||
public static final class WrongName extends ESTestCase {}
|
||||
|
||||
public static abstract class DummyAbstractTests extends ESTestCase {}
|
||||
|
||||
public interface DummyInterfaceTests {}
|
||||
|
||||
public static final class InnerTests extends ESTestCase {}
|
||||
|
||||
public static final class WrongNameTheSecond extends ESTestCase {}
|
||||
|
||||
public static final class PlainUnit extends TestCase {}
|
||||
}
|
|
@ -40,8 +40,8 @@ import org.elasticsearch.cluster.routing.allocation.AllocationService;
|
|||
import org.elasticsearch.cluster.routing.allocation.FailedRerouteAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.ClusterRebalanceAllocationDecider;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.index.Index;
|
||||
import org.elasticsearch.test.ESAllocationTestCase;
|
||||
import org.junit.Before;
|
||||
|
@ -50,7 +50,6 @@ import java.util.ArrayList;
|
|||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Optional;
|
||||
import java.util.Set;
|
||||
import java.util.function.Function;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -306,7 +305,7 @@ public class ShardFailedClusterStateTaskExecutorTests extends ESAllocationTestCa
|
|||
return randomSubsetOf(1, shards.toArray(new ShardRouting[0])).get(0);
|
||||
} else {
|
||||
return
|
||||
TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), DiscoveryService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values()));
|
||||
TestShardRouting.newShardRouting(shardRouting.index(), shardRouting.id(), InternalClusterService.generateNodeId(Settings.EMPTY), randomBoolean(), randomFrom(ShardRoutingState.values()));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.elasticsearch.cluster.routing.allocation.decider.EnableAllocationDeci
|
|||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.store.IndexStoreConfig;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
|
@ -57,7 +58,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testDeleteIsAppliedFirst() {
|
||||
DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class);
|
||||
DiscoverySettings discoverySettings = getDiscoverySettings();
|
||||
|
||||
assertEquals(discoverySettings.getPublishTimeout(), DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY));
|
||||
assertTrue(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY));
|
||||
|
@ -83,7 +84,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testResetClusterSetting() {
|
||||
DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class);
|
||||
DiscoverySettings discoverySettings = getDiscoverySettings();
|
||||
|
||||
assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY)));
|
||||
assertThat(discoverySettings.getPublishDiff(), equalTo(DiscoverySettings.PUBLISH_DIFF_ENABLE_SETTING.get(Settings.EMPTY)));
|
||||
|
@ -243,7 +244,7 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
|
||||
public void testUpdateDiscoveryPublishTimeout() {
|
||||
|
||||
DiscoverySettings discoverySettings = internalCluster().getInstance(DiscoverySettings.class);
|
||||
DiscoverySettings discoverySettings = getDiscoverySettings();
|
||||
|
||||
assertThat(discoverySettings.getPublishTimeout(), equalTo(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.get(Settings.EMPTY)));
|
||||
|
||||
|
@ -281,6 +282,8 @@ public class ClusterSettingsIT extends ESIntegTestCase {
|
|||
assertThat(discoverySettings.getPublishTimeout().seconds(), equalTo(1L));
|
||||
}
|
||||
|
||||
private DiscoverySettings getDiscoverySettings() {return internalCluster().getInstance(Discovery.class).getDiscoverySettings();}
|
||||
|
||||
public void testClusterUpdateSettingsWithBlocks() {
|
||||
String key1 = "cluster.routing.allocation.enable";
|
||||
Settings transientSettings = Settings.builder().put(key1, EnableAllocationDecider.Allocation.NONE.name()).build();
|
||||
|
|
|
@ -958,7 +958,7 @@ public class DiscoveryWithServiceDisruptionsIT extends ESIntegTestCase {
|
|||
// don't wait for initial state, wat want to add the disruption while the cluster is forming..
|
||||
internalCluster().startNodesAsync(3,
|
||||
Settings.builder()
|
||||
.put(DiscoveryService.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "1ms")
|
||||
.put(DiscoverySettings.INITIAL_STATE_TIMEOUT_SETTING.getKey(), "1ms")
|
||||
.put(DiscoverySettings.PUBLISH_TIMEOUT_SETTING.getKey(), "3s")
|
||||
.build()).get();
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ public class GatewayServiceTests extends ESTestCase {
|
|||
.put("http.enabled", "false")
|
||||
.put("discovery.type", "local")
|
||||
.put(settings.build()).build(),
|
||||
null, new NoopClusterService(), null, null, null, null, null, new NoopDiscovery());
|
||||
null, new NoopClusterService(), null, null, null, null, new NoopDiscovery());
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
*/
|
||||
public void testNoMatchingAllocationIdFound() {
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.CURRENT, "id2");
|
||||
testAllocator.addData(node1, 1, "id1", randomBoolean());
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "id1", randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().size(), equalTo(1));
|
||||
|
@ -145,7 +145,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
final RoutingAllocation allocation;
|
||||
if (randomBoolean()) {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
|
||||
testAllocator.addData(node1, 1, "allocId1", randomBoolean(), new CorruptIndexException("test", "test"));
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean(), new CorruptIndexException("test", "test"));
|
||||
} else {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_1_1);
|
||||
testAllocator.addData(node1, 3, null, randomBoolean(), new CorruptIndexException("test", "test"));
|
||||
|
@ -164,7 +164,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
boolean useAllocationIds = randomBoolean();
|
||||
if (useAllocationIds) {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
|
||||
testAllocator.addData(node1, 1, "allocId1", randomBoolean());
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
|
||||
} else {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_2_0);
|
||||
testAllocator.addData(node1, 3, null, randomBoolean());
|
||||
|
@ -188,8 +188,8 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
String replicaAllocId = Strings.randomBase64UUID();
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), primaryAllocId, replicaAllocId);
|
||||
boolean node1HasPrimaryShard = randomBoolean();
|
||||
testAllocator.addData(node1, 1, node1HasPrimaryShard ? primaryAllocId : replicaAllocId, node1HasPrimaryShard);
|
||||
testAllocator.addData(node2, 1, node1HasPrimaryShard ? replicaAllocId : primaryAllocId, !node1HasPrimaryShard);
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, node1HasPrimaryShard ? primaryAllocId : replicaAllocId, node1HasPrimaryShard);
|
||||
testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, node1HasPrimaryShard ? replicaAllocId : primaryAllocId, !node1HasPrimaryShard);
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
|
@ -206,7 +206,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
final RoutingAllocation allocation;
|
||||
if (randomBoolean()) {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
|
||||
testAllocator.addData(node1, 1, "allocId1", randomBoolean());
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
|
||||
} else {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(throttleAllocationDeciders(), false, Version.V_2_2_0);
|
||||
testAllocator.addData(node1, 3, null, randomBoolean());
|
||||
|
@ -225,7 +225,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
final RoutingAllocation allocation;
|
||||
if (randomBoolean()) {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, randomFrom(Version.V_2_0_0, Version.CURRENT), "allocId1");
|
||||
testAllocator.addData(node1, 1, "allocId1", randomBoolean());
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, "allocId1", randomBoolean());
|
||||
} else {
|
||||
allocation = routingAllocationWithOnePrimaryNoReplicas(noAllocationDeciders(), false, Version.V_2_0_0);
|
||||
testAllocator.addData(node1, 3, null, randomBoolean());
|
||||
|
@ -250,13 +250,36 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that shard with allocation id is chosen if such a shard is available in version-based allocation mode. This happens if a shard
|
||||
* was already selected in a 5.x cluster as primary for recovery, was initialized (and wrote a new state file) but did not make it to
|
||||
* STARTED state before the cluster crashed (otherwise list of active allocation ids would be non-empty and allocation id - based
|
||||
* allocation mode would be chosen).
|
||||
*/
|
||||
public void testVersionBasedAllocationPrefersShardWithAllocationId() {
|
||||
RoutingAllocation allocation = routingAllocationWithOnePrimaryNoReplicas(yesAllocationDeciders(), false, Version.V_2_0_0);
|
||||
testAllocator.addData(node1, 10, null, randomBoolean());
|
||||
testAllocator.addData(node2, ShardStateMetaData.NO_VERSION, "some allocId", randomBoolean());
|
||||
testAllocator.addData(node3, 12, null, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).size(), equalTo(1));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).currentNodeId(), equalTo(node2.id()));
|
||||
assertThat(allocation.routingNodes().shardsWithState(ShardRoutingState.INITIALIZING).get(0).allocationId().getId(), equalTo("some allocId"));
|
||||
}
|
||||
|
||||
/**
|
||||
* Tests that when restoring from a snapshot and we find a node with a shard copy and allocation
|
||||
* deciders say yes, we allocate to that node.
|
||||
*/
|
||||
public void testRestore() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, 1, randomFrom(null, "allocId"), randomBoolean());
|
||||
boolean shardStateHasAllocationId = randomBoolean();
|
||||
String allocationId = shardStateHasAllocationId ? "some allocId" : null;
|
||||
long legacyVersion = shardStateHasAllocationId ? ShardStateMetaData.NO_VERSION : 1;
|
||||
boolean clusterHasActiveAllocationIds = shardStateHasAllocationId ? randomBoolean() : false;
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), clusterHasActiveAllocationIds);
|
||||
testAllocator.addData(node1, legacyVersion, allocationId, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
|
@ -268,8 +291,12 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* deciders say throttle, we add it to ignored shards.
|
||||
*/
|
||||
public void testRestoreThrottle() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders());
|
||||
testAllocator.addData(node1, 1, randomFrom(null, "allocId"), randomBoolean());
|
||||
boolean shardStateHasAllocationId = randomBoolean();
|
||||
String allocationId = shardStateHasAllocationId ? "some allocId" : null;
|
||||
long legacyVersion = shardStateHasAllocationId ? ShardStateMetaData.NO_VERSION : 1;
|
||||
boolean clusterHasActiveAllocationIds = shardStateHasAllocationId ? randomBoolean() : false;
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders(), clusterHasActiveAllocationIds);
|
||||
testAllocator.addData(node1, legacyVersion, allocationId, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
|
||||
|
@ -280,8 +307,12 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* deciders say no, we still allocate to that node.
|
||||
*/
|
||||
public void testRestoreForcesAllocateIfShardAvailable() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders());
|
||||
testAllocator.addData(node1, 1, randomFrom(null, "some allocId"), randomBoolean());
|
||||
boolean shardStateHasAllocationId = randomBoolean();
|
||||
String allocationId = shardStateHasAllocationId ? "some allocId" : null;
|
||||
long legacyVersion = shardStateHasAllocationId ? ShardStateMetaData.NO_VERSION : 1;
|
||||
boolean clusterHasActiveAllocationIds = shardStateHasAllocationId ? randomBoolean() : false;
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(noAllocationDeciders(), clusterHasActiveAllocationIds);
|
||||
testAllocator.addData(node1, legacyVersion, allocationId, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
|
@ -293,7 +324,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* the unassigned list to be allocated later.
|
||||
*/
|
||||
public void testRestoreDoesNotAssignIfNoShardAvailable() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders());
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(yesAllocationDeciders(), randomBoolean());
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, null, false);
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(false));
|
||||
|
@ -301,11 +332,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
|
||||
}
|
||||
|
||||
private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders) {
|
||||
Version version = randomFrom(Version.CURRENT, Version.V_2_0_0);
|
||||
private RoutingAllocation getRestoreRoutingAllocation(AllocationDeciders allocationDeciders, boolean hasActiveAllocation) {
|
||||
Version version = hasActiveAllocation ? Version.CURRENT : Version.V_2_0_0;
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(version)).numberOfShards(1).numberOfReplicas(0)
|
||||
.putActiveAllocationIds(0, version == Version.CURRENT ? Sets.newHashSet("allocId") : Collections.emptySet()))
|
||||
.putActiveAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
@ -323,8 +354,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* deciders say yes, we allocate to that node.
|
||||
*/
|
||||
public void testRecoverOnAnyNode() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders());
|
||||
testAllocator.addData(node1, 1, randomFrom(null, "allocId"), randomBoolean());
|
||||
boolean hasActiveAllocation = randomBoolean();
|
||||
String allocationId = hasActiveAllocation ? "allocId" : null;
|
||||
long legacyVersion = hasActiveAllocation ? ShardStateMetaData.NO_VERSION : 1;
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), hasActiveAllocation);
|
||||
testAllocator.addData(node1, legacyVersion, allocationId, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
|
@ -336,8 +370,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* deciders say throttle, we add it to ignored shards.
|
||||
*/
|
||||
public void testRecoverOnAnyNodeThrottle() {
|
||||
RoutingAllocation allocation = getRestoreRoutingAllocation(throttleAllocationDeciders());
|
||||
testAllocator.addData(node1, 1, randomFrom(null, "allocId"), randomBoolean());
|
||||
boolean hasActiveAllocation = randomBoolean();
|
||||
String allocationId = hasActiveAllocation ? "allocId" : null;
|
||||
long legacyVersion = hasActiveAllocation ? ShardStateMetaData.NO_VERSION : 1;
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(throttleAllocationDeciders(), hasActiveAllocation);
|
||||
testAllocator.addData(node1, legacyVersion, allocationId, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(false));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(false));
|
||||
|
@ -348,8 +385,11 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* deciders say no, we still allocate to that node.
|
||||
*/
|
||||
public void testRecoverOnAnyNodeForcesAllocateIfShardAvailable() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders());
|
||||
testAllocator.addData(node1, 1, randomFrom(null, "allocId"), randomBoolean());
|
||||
boolean hasActiveAllocation = randomBoolean();
|
||||
String allocationId = hasActiveAllocation ? "allocId" : null;
|
||||
long legacyVersion = hasActiveAllocation ? ShardStateMetaData.NO_VERSION : 1;
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(noAllocationDeciders(), hasActiveAllocation);
|
||||
testAllocator.addData(node1, legacyVersion, allocationId, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(true));
|
||||
assertThat(allocation.routingNodes().unassigned().ignored().isEmpty(), equalTo(true));
|
||||
|
@ -361,7 +401,7 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
* BalancedShardAllocator assign the shard
|
||||
*/
|
||||
public void testRecoverOnAnyNodeDoesNotAssignIfNoShardAvailable() {
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders());
|
||||
RoutingAllocation allocation = getRecoverOnAnyNodeRoutingAllocation(yesAllocationDeciders(), randomBoolean());
|
||||
testAllocator.addData(node1, ShardStateMetaData.NO_VERSION, null, randomBoolean());
|
||||
boolean changed = testAllocator.allocateUnassigned(allocation);
|
||||
assertThat(changed, equalTo(false));
|
||||
|
@ -369,13 +409,13 @@ public class PrimaryShardAllocatorTests extends ESAllocationTestCase {
|
|||
assertThat(allocation.routingNodes().unassigned().size(), equalTo(1));
|
||||
}
|
||||
|
||||
private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders) {
|
||||
Version version = randomFrom(Version.CURRENT, Version.V_2_0_0);
|
||||
private RoutingAllocation getRecoverOnAnyNodeRoutingAllocation(AllocationDeciders allocationDeciders, boolean hasActiveAllocation) {
|
||||
Version version = hasActiveAllocation ? Version.CURRENT : Version.V_2_0_0;
|
||||
MetaData metaData = MetaData.builder()
|
||||
.put(IndexMetaData.builder(shardId.getIndexName()).settings(settings(version)
|
||||
.put(IndexMetaData.SETTING_SHARED_FILESYSTEM, true)
|
||||
.put(IndexMetaData.SETTING_SHARED_FS_ALLOW_RECOVERY_ON_ANY_NODE, true))
|
||||
.numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, version == Version.CURRENT ? Sets.newHashSet("allocId") : Collections.emptySet()))
|
||||
.numberOfShards(1).numberOfReplicas(0).putActiveAllocationIds(0, hasActiveAllocation ? Sets.newHashSet("allocId") : Collections.emptySet()))
|
||||
.build();
|
||||
|
||||
RoutingTable routingTable = RoutingTable.builder()
|
||||
|
|
|
@ -204,13 +204,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
|
||||
|
||||
// test if we still write it even if the shard is not active
|
||||
ShardRouting inactiveRouting = TestShardRouting.newShardRouting(shard.shardRouting.index(), shard.shardRouting.shardId().id(), shard.shardRouting.currentNodeId(), null, null, true, ShardRoutingState.INITIALIZING);
|
||||
shard.persistMetadata(inactiveRouting, shard.shardRouting);
|
||||
shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, getShardStateMetadata(shard));
|
||||
assertEquals("inactive shard state shouldn't be persisted", shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
|
||||
|
||||
// check that we don't write shard state metadata if persist == false
|
||||
ShardRouting updatedRouting = new ShardRouting(shard.shardRouting);
|
||||
TestShardRouting.relocate(updatedRouting, "some node", 42L);
|
||||
shard.updateRoutingEntry(updatedRouting, false);
|
||||
|
@ -218,6 +212,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
assertFalse("shard state persisted despite of persist=false", shardStateMetaData.equals(getShardStateMetadata(shard)));
|
||||
assertEquals("shard state persisted despite of persist=false", shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
|
||||
|
||||
// check that we write shard state metadata if persist == true
|
||||
shard.updateRoutingEntry(routing, false); // move back state in IndexShard
|
||||
routing = new ShardRouting(updatedRouting);
|
||||
shard.updateRoutingEntry(routing, true);
|
||||
|
@ -226,33 +221,6 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
assertEquals(shardStateMetaData, new ShardStateMetaData(routing.primary(), shard.indexSettings().getUUID(), routing.allocationId()));
|
||||
}
|
||||
|
||||
public void testDeleteShardState() throws IOException {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
NodeEnvironment env = getInstanceFromNode(NodeEnvironment.class);
|
||||
IndexService test = indicesService.indexService("test");
|
||||
IndexShard shard = test.getShardOrNull(0);
|
||||
try {
|
||||
shard.deleteShardState();
|
||||
fail("shard is active metadata delete must fail");
|
||||
} catch (IllegalStateException ex) {
|
||||
// fine - only delete if non-active
|
||||
}
|
||||
|
||||
ShardRouting routing = shard.routingEntry();
|
||||
ShardStateMetaData shardStateMetaData = load(logger, env.availableShardPaths(shard.shardId));
|
||||
assertEquals(shardStateMetaData, getShardStateMetadata(shard));
|
||||
|
||||
routing = TestShardRouting.newShardRouting(shard.shardId.getIndex(), shard.shardId.id(), routing.currentNodeId(), null, routing.primary(), ShardRoutingState.INITIALIZING, shard.shardRouting.allocationId());
|
||||
shard.updateRoutingEntry(routing, true);
|
||||
shard.deleteShardState();
|
||||
|
||||
assertNull("no shard state expected after delete on initializing", load(logger, env.availableShardPaths(shard.shardId)));
|
||||
|
||||
|
||||
}
|
||||
|
||||
public void testFailShard() throws Exception {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
|
@ -973,7 +941,7 @@ public class IndexShardTests extends ESSingleNodeTestCase {
|
|||
assertHitCount(client().prepareSearch().get(), 1);
|
||||
}
|
||||
|
||||
public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedException {
|
||||
public void testRecoveryFailsAfterMovingToRelocatedState() throws InterruptedException, IOException {
|
||||
createIndex("test");
|
||||
ensureGreen();
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
|
|
|
@ -30,13 +30,13 @@ import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
|
|||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsResponse;
|
||||
import org.elasticsearch.action.index.IndexRequestBuilder;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.MoveAllocationCommand;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.ByteSizeUnit;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.store.Store;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
@ -558,7 +558,7 @@ public class IndexRecoveryIT extends ESIntegTestCase {
|
|||
ensureSearchable(indexName);
|
||||
|
||||
ClusterStateResponse stateResponse = client().admin().cluster().prepareState().get();
|
||||
final String blueNodeId = internalCluster().getInstance(DiscoveryService.class, blueNodeName).localNode().id();
|
||||
final String blueNodeId = internalCluster().getInstance(ClusterService.class, blueNodeName).localNode().id();
|
||||
|
||||
assertFalse(stateResponse.getState().getRoutingNodes().node(blueNodeId).isEmpty());
|
||||
|
||||
|
|
|
@ -25,8 +25,8 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.component.Lifecycle;
|
||||
import org.elasticsearch.common.component.LifecycleListener;
|
||||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.InitialStateDiscoveryListener;
|
||||
import org.elasticsearch.node.service.NodeService;
|
||||
|
||||
public class NoopDiscovery implements Discovery {
|
||||
|
@ -37,16 +37,6 @@ public class NoopDiscovery implements Discovery {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addListener(InitialStateDiscoveryListener listener) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeListener(InitialStateDiscoveryListener listener) {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public String nodeDescription() {
|
||||
return null;
|
||||
|
@ -72,6 +62,11 @@ public class NoopDiscovery implements Discovery {
|
|||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DiscoverySettings getDiscoverySettings() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void startInitialJoin() {
|
||||
|
||||
|
|
|
@ -172,7 +172,7 @@ case "$1" in
|
|||
i=0
|
||||
timeout=10
|
||||
# Wait for the process to be properly started before exiting
|
||||
until { cat "$PID_FILE" | xargs kill -0; } >/dev/null 2>&1
|
||||
until { kill -0 `cat "$PID_FILE"`; } >/dev/null 2>&1
|
||||
do
|
||||
sleep 1
|
||||
i=$(($i + 1))
|
||||
|
|
|
@ -118,14 +118,6 @@ if [ "x$JAVA_TOOL_OPTIONS" != "x" ]; then
|
|||
unset JAVA_TOOL_OPTIONS
|
||||
fi
|
||||
|
||||
# Special-case path variables.
|
||||
case `uname` in
|
||||
CYGWIN*)
|
||||
ES_CLASSPATH=`cygpath -p -w "$ES_CLASSPATH"`
|
||||
ES_HOME=`cygpath -p -w "$ES_HOME"`
|
||||
;;
|
||||
esac
|
||||
|
||||
# full hostname passed through cut for portability on systems that do not support hostname -s
|
||||
# export on separate line for shells that do not support combining definition and export
|
||||
HOSTNAME=`hostname | cut -d. -f1`
|
||||
|
|
|
@ -7,21 +7,12 @@ You can use the *Java client* in multiple ways:
|
|||
<<java-docs-delete,delete>> and <<java-search,search>> operations on an
|
||||
existing cluster
|
||||
* Perform administrative tasks on a running cluster
|
||||
* Start full nodes when you want to run Elasticsearch embedded in your
|
||||
own application or when you want to launch unit or integration tests
|
||||
|
||||
Obtaining an elasticsearch `Client` is simple. The most common way to
|
||||
get a client is by:
|
||||
|
||||
1. Creating an embedded <<node-client,`Node`>> that acts as a node
|
||||
within a cluster.
|
||||
2. Requesting a `Client` from your embedded `Node`.
|
||||
|
||||
Another manner is by creating a <<transport-client,`TransportClient`>>
|
||||
get a client is by creating a <<transport-client,`TransportClient`>>
|
||||
that connects to a cluster.
|
||||
|
||||
*Important:*
|
||||
|
||||
______________________________________________________________________________________________________________________________________________________________
|
||||
Please note that you are encouraged to use the same version on client
|
||||
and cluster sides. You may hit some incompatibility issues when mixing
|
||||
|
@ -29,111 +20,6 @@ major versions.
|
|||
______________________________________________________________________________________________________________________________________________________________
|
||||
|
||||
|
||||
[[node-client]]
|
||||
=== Node Client
|
||||
|
||||
Instantiating a node based client is the simplest way to get a `Client`
|
||||
that can execute operations against elasticsearch.
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
|
||||
// on startup
|
||||
|
||||
Node node = new Node(Settings.EMPTY).start();
|
||||
Client client = node.client();
|
||||
|
||||
// on shutdown
|
||||
|
||||
node.close();
|
||||
--------------------------------------------------
|
||||
|
||||
When you start a `Node`, it joins an elasticsearch cluster. You can have
|
||||
different clusters by simply setting the `cluster.name` setting, or
|
||||
explicitly using the `clusterName` method on the builder.
|
||||
|
||||
You can define `cluster.name` in the `/src/main/resources/elasticsearch.yml`
|
||||
file in your project. As long as `elasticsearch.yml` is present in the
|
||||
classpath, it will be used when you start your node.
|
||||
|
||||
[source,yaml]
|
||||
--------------------------------------------------
|
||||
cluster.name: yourclustername
|
||||
--------------------------------------------------
|
||||
|
||||
Or in Java:
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
Node node = nodeBuilder().clusterName("yourclustername").node();
|
||||
Client client = node.client();
|
||||
--------------------------------------------------
|
||||
|
||||
The benefit of using the `Client` is the fact that operations are
|
||||
automatically routed to the node(s) the operations need to be executed
|
||||
on, without performing a "double hop". For example, the index operation
|
||||
will automatically be executed on the shard that it will end up existing
|
||||
at.
|
||||
|
||||
When you start a `Node`, the most important decision is whether it
|
||||
should hold data or not. In other words, should indices and shards be
|
||||
allocated to it. Many times we would like to have the clients just be
|
||||
clients, without shards being allocated to them. This is simple to
|
||||
configure by setting either `node.data` setting to `false` or
|
||||
`node.client` to `true` (the `NodeBuilder` respective helper methods on
|
||||
it):
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
|
||||
// on startup
|
||||
|
||||
// Embedded node clients behave just like standalone nodes,
|
||||
// which means that they will leave the HTTP port open!
|
||||
Node node = new Node(Settings.settingsBuilder()
|
||||
.put("http.enabled", false)
|
||||
.put("node.client", true).build())
|
||||
.start();
|
||||
|
||||
Client client = node.client();
|
||||
|
||||
// on shutdown
|
||||
|
||||
node.close();
|
||||
--------------------------------------------------
|
||||
|
||||
Another common usage is to start the `Node` and use the `Client` in
|
||||
unit/integration tests. In such a case, we would like to start a "local"
|
||||
`Node` (with a "local" discovery and transport). Again, this is just a
|
||||
matter of a simple setting when starting the `Node`. Note, "local" here
|
||||
means local on the JVM (well, actually class loader) level, meaning that
|
||||
two *local* servers started within the same JVM will discover themselves
|
||||
and form a cluster.
|
||||
|
||||
[source,java]
|
||||
--------------------------------------------------
|
||||
|
||||
// on startup
|
||||
|
||||
Node node = new Node(Settings.builder().put("node.local", true).build()).start();
|
||||
Client client = node.client();
|
||||
|
||||
// on shutdown
|
||||
|
||||
node.close();
|
||||
--------------------------------------------------
|
||||
|
||||
[[node-client-downsides]]
|
||||
==== Node Client Downsides
|
||||
|
||||
Embedding a node client into your application is the easiest way to connect
|
||||
to an Elasticsearch cluster, but it carries some downsides.
|
||||
|
||||
- Frequently starting and stopping one or more node clients creates unnecessary
|
||||
noise across the cluster.
|
||||
- Embedded node client will respond to outside requests, just like any other client.
|
||||
** You almost always want to disable HTTP for an _embedded_ node client.
|
||||
|
||||
[[transport-client]]
|
||||
=== Transport Client
|
||||
|
||||
|
@ -194,3 +80,12 @@ from a node. Defaults to `5s`.
|
|||
|`client.transport.nodes_sampler_interval` |How often to sample / ping
|
||||
the nodes listed and connected. Defaults to `5s`.
|
||||
|=======================================================================
|
||||
|
||||
|
||||
[[client-connected-to-client-node]]
|
||||
=== Connecting a Client to a Client Node
|
||||
|
||||
You can start locally a {ref}/modules-node.html#client-node[Client Node] and then simply create
|
||||
a <<transport-client,`TransportClient`>> in your application which connects to this Client Node.
|
||||
|
||||
This way, the client node will be able to load whatever plugin you need (think about discovery plugins for example).
|
||||
|
|
|
@ -68,6 +68,10 @@ do {
|
|||
scrollResp = client.prepareSearchScroll(scrollResp.getScrollId()).setScroll(new TimeValue(60000)).execute().actionGet();
|
||||
} while(scrollResp.getHits().getHits().length != 0); // Zero hits mark the end of the scroll and the while loop.
|
||||
--------------------------------------------------
|
||||
[NOTE]
|
||||
====
|
||||
The size-parameter is per shard, so if you run a query against multiple indices (leading to many shards being involved in the query) the result might be more documents per execution of the scroll than you would expect!
|
||||
====
|
||||
|
||||
[[java-search-msearch]]
|
||||
=== MultiSearch API
|
||||
|
|
|
@ -64,8 +64,10 @@ cloud:
|
|||
|
||||
`my_account1` is the default account which will be used by a repository unless you set an explicit one.
|
||||
|
||||
You can set the timeout to use when making any single request. It can be defined globally, per account or both.
|
||||
Defaults to `5m`.
|
||||
You can set the client side timeout to use when making any single request. It can be defined globally, per account or both.
|
||||
It's not set by default which means that elasticsearch is using the
|
||||
http://azure.github.io/azure-storage-java/com/microsoft/azure/storage/RequestOptions.html#setTimeoutIntervalInMs(java.lang.Integer)[default value]
|
||||
set by the azure client (known as 5 minutes).
|
||||
|
||||
[source,yaml]
|
||||
----
|
||||
|
|
|
@ -24,7 +24,7 @@ Pipeline aggregations cannot have sub-aggregations but depending on the type it
|
|||
allowing pipeline aggregations to be chained. For example, you can chain together two derivatives to calculate the second derivative
|
||||
(i.e. a derivative of a derivative).
|
||||
|
||||
NOTE: Because pipeline aggregations only add to the output, when chaining pipeline aggregations the output of each pipeline aggregation
|
||||
NOTE: Because pipeline aggregations only add to the output, when chaining pipeline aggregations the output of each pipeline aggregation
|
||||
will be included in the final output.
|
||||
|
||||
[[buckets-path-syntax]]
|
||||
|
@ -104,7 +104,7 @@ a metric embedded inside a sibling aggregation:
|
|||
`sales_per_month` date histogram.
|
||||
|
||||
[float]
|
||||
==== Special Paths
|
||||
=== Special Paths
|
||||
|
||||
Instead of pathing to a metric, `buckets_path` can use a special `"_count"` path. This instructs
|
||||
the pipeline aggregation to use the document count as it's input. For example, a moving average can be calculated on the document
|
||||
|
@ -128,6 +128,20 @@ count of each bucket, instead of a specific metric:
|
|||
--------------------------------------------------
|
||||
<1> By using `_count` instead of a metric name, we can calculate the moving average of document counts in the histogram
|
||||
|
||||
[[dots-in-agg-names]]
|
||||
[float]
|
||||
=== Dealing with dots in agg names
|
||||
|
||||
An alternate syntax is supported to cope with aggregations or metrics which
|
||||
have dots in the name, such as the ++99.9++th
|
||||
<<search-aggregations-metrics-percentile-aggregation,percentile>>. This metric
|
||||
may be referred to as:
|
||||
|
||||
[source,js]
|
||||
---------------
|
||||
"buckets_path": "my_percentile[99.9]"
|
||||
---------------
|
||||
|
||||
[[gap-policy]]
|
||||
[float]
|
||||
=== Dealing with gaps in the data
|
||||
|
|
|
@ -4,6 +4,16 @@
|
|||
A char filter of type `mapping` replacing characters of an analyzed text
|
||||
with given mapping.
|
||||
|
||||
[horizontal]
|
||||
`mappings`::
|
||||
|
||||
A list of mappings to use.
|
||||
|
||||
`mappings_path`::
|
||||
|
||||
A path, relative to the `config` directory, to a mappings file
|
||||
configuration.
|
||||
|
||||
Here is a sample configuration:
|
||||
|
||||
[source,js]
|
||||
|
@ -14,7 +24,10 @@ Here is a sample configuration:
|
|||
"char_filter" : {
|
||||
"my_mapping" : {
|
||||
"type" : "mapping",
|
||||
"mappings" : ["ph=>f", "qu=>k"]
|
||||
"mappings" : [
|
||||
"ph => f",
|
||||
"qu => k"
|
||||
]
|
||||
}
|
||||
},
|
||||
"analyzer" : {
|
||||
|
@ -27,12 +40,3 @@ Here is a sample configuration:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
|
||||
Otherwise the setting `mappings_path` can specify a file where you can
|
||||
put the list of char mapping :
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
ph => f
|
||||
qu => k
|
||||
--------------------------------------------------
|
||||
|
|
|
@ -16,7 +16,7 @@ As a general rule:
|
|||
|
||||
See <<setup-upgrade>> for more info.
|
||||
--
|
||||
include::migrate_3_0.asciidoc[]
|
||||
include::migrate_5_0.asciidoc[]
|
||||
|
||||
include::migrate_2_3.asciidoc[]
|
||||
|
||||
|
|
|
@ -191,6 +191,12 @@ to check for a custom config location. The value of the `CONF_DIR`
|
|||
variable can be set in the environment config file which is located either in
|
||||
`/etc/default/elasticsearch` or `/etc/sysconfig/elasticsearch`.
|
||||
|
||||
==== Custom analysis file paths
|
||||
|
||||
It is no longer possible to set custom file path outside `CONF_DIR` for `*_path` settings
|
||||
in <<analysis-charfilters,char>> or <<analysis-tokenfilters,token>> filters.
|
||||
You must specify either relative path to `CONF_DIR` location or absolute path inside `CONF_DIR` location.
|
||||
|
||||
==== `ES_CLASSPATH removed`
|
||||
|
||||
The `ES_CLASSPATH` environment variable is no longer used to set the class
|
||||
|
|
|
@ -1,28 +1,28 @@
|
|||
[[breaking-changes-3.0]]
|
||||
== Breaking changes in 3.0
|
||||
[[breaking-changes-5.0]]
|
||||
== Breaking changes in 5.0
|
||||
|
||||
This section discusses the changes that you need to be aware of when migrating
|
||||
your application to Elasticsearch 3.0.
|
||||
your application to Elasticsearch 5.0.
|
||||
|
||||
* <<breaking_30_search_changes>>
|
||||
* <<breaking_30_rest_api_changes>>
|
||||
* <<breaking_30_cat_api>>
|
||||
* <<breaking_30_parent_child_changes>>
|
||||
* <<breaking_30_settings_changes>>
|
||||
* <<breaking_30_mapping_changes>>
|
||||
* <<breaking_30_plugins>>
|
||||
* <<breaking_30_java_api_changes>>
|
||||
* <<breaking_30_cache_concurrency>>
|
||||
* <<breaking_30_non_loopback>>
|
||||
* <<breaking_30_thread_pool>>
|
||||
* <<breaking_30_allocation>>
|
||||
* <<breaking_30_percolator>>
|
||||
* <<breaking_30_packaging>>
|
||||
* <<breaking_30_scripting>>
|
||||
* <<breaking_30_term_vectors>>
|
||||
* <<breaking_30_security>>
|
||||
* <<breaking_50_search_changes>>
|
||||
* <<breaking_50_rest_api_changes>>
|
||||
* <<breaking_50_cat_api>>
|
||||
* <<breaking_50_parent_child_changes>>
|
||||
* <<breaking_50_settings_changes>>
|
||||
* <<breaking_50_mapping_changes>>
|
||||
* <<breaking_50_plugins>>
|
||||
* <<breaking_50_java_api_changes>>
|
||||
* <<breaking_50_cache_concurrency>>
|
||||
* <<breaking_50_non_loopback>>
|
||||
* <<breaking_50_thread_pool>>
|
||||
* <<breaking_50_allocation>>
|
||||
* <<breaking_50_percolator>>
|
||||
* <<breaking_50_packaging>>
|
||||
* <<breaking_50_scripting>>
|
||||
* <<breaking_50_term_vectors>>
|
||||
* <<breaking_50_security>>
|
||||
|
||||
[[breaking_30_search_changes]]
|
||||
[[breaking_50_search_changes]]
|
||||
=== Warmers
|
||||
|
||||
Thanks to several changes like doc values by default or disk-based norms,
|
||||
|
@ -31,7 +31,7 @@ API have been removed: it is not possible anymore to register queries that
|
|||
will run before a new IndexSearcher is published.
|
||||
|
||||
Don't worry if you have warmers defined on your indices, they will simply be
|
||||
ignored when upgrading to 3.0.
|
||||
ignored when upgrading to 5.0.
|
||||
|
||||
=== Search changes
|
||||
|
||||
|
@ -94,7 +94,7 @@ Scroll requests sorted by `_doc` have been optimized to more efficiently resume
|
|||
from where the previous request stopped, so this will have the same performance
|
||||
characteristics as the former `scan` search type.
|
||||
|
||||
[[breaking_30_rest_api_changes]]
|
||||
[[breaking_50_rest_api_changes]]
|
||||
=== REST API changes
|
||||
|
||||
==== id values longer than 512 bytes are rejected
|
||||
|
@ -166,7 +166,7 @@ Removed support for the undocumented `query_binary` and `filter_binary` sections
|
|||
|
||||
Payloads are now loaded when needed.
|
||||
|
||||
[[breaking_30_cat_api]]
|
||||
[[breaking_50_cat_api]]
|
||||
=== CAT API changes
|
||||
|
||||
==== Use Accept header for specifying response media type
|
||||
|
@ -184,14 +184,14 @@ The `host` field has been removed from the cat nodes API as its value
|
|||
is always equal to the `ip` field. The `name` field is available in the
|
||||
cat nodes API and should be used instead of the `host` field.
|
||||
|
||||
[[breaking_30_parent_child_changes]]
|
||||
[[breaking_50_parent_child_changes]]
|
||||
=== Parent/Child changes
|
||||
|
||||
The `children` aggregation, parent child inner hits and `has_child` and `has_parent` queries will not work on indices
|
||||
with `_parent` field mapping created before version `2.0.0`. The data of these indices need to be re-indexed into a new index.
|
||||
|
||||
The format of the join between parent and child documents have changed with the `2.0.0` release. The old
|
||||
format can't read from version `3.0.0` and onwards. The new format allows for a much more efficient and
|
||||
format can't read from version `5.0.0` and onwards. The new format allows for a much more efficient and
|
||||
scalable join between parent and child documents and the join data structures are stored on disk
|
||||
data structures as opposed as before the join data structures were stored in the jvm heap space.
|
||||
|
||||
|
@ -213,15 +213,15 @@ is needed then the `max_children` option shouldn't be defined at all on the `has
|
|||
|
||||
==== `_parent` field no longer indexed
|
||||
|
||||
The join between parent and child documents no longer relies on indexed fields and therefor from `3.0.0` onwards
|
||||
The join between parent and child documents no longer relies on indexed fields and therefor from `5.0.0` onwards
|
||||
the `_parent` indexed field won't be indexed. In order to find documents that referrer to a specific parent id
|
||||
the new `parent_id` query can be used. The get response and hits inside the search response remain to include
|
||||
the parent id under the `_parent` key.
|
||||
|
||||
[[breaking_30_settings_changes]]
|
||||
[[breaking_50_settings_changes]]
|
||||
=== Settings changes
|
||||
|
||||
From Elasticsearch 3.0 on all settings are validated before they are applied. Node level and default index
|
||||
From Elasticsearch 5.0 on all settings are validated before they are applied. Node level and default index
|
||||
level settings are validated on node startup, dynamic cluster and index setting are validated before they are updated/added
|
||||
to the cluster state. Every setting must be a _known_ setting or in other words all settings must be registered with the
|
||||
node or transport client they are used with. This implies that plugins that define custom settings must register all of their
|
||||
|
@ -330,7 +330,7 @@ control the queue implementation used in the cluster service and the
|
|||
handling of ping responses during discovery. This was an undocumented
|
||||
setting and has been removed.
|
||||
|
||||
[[breaking_30_mapping_changes]]
|
||||
[[breaking_50_mapping_changes]]
|
||||
=== Mapping changes
|
||||
|
||||
==== Default doc values settings
|
||||
|
@ -358,8 +358,8 @@ instead of `not_analyzed`/`no`. The `string` field still accepts
|
|||
==== ++_source++'s `format` option
|
||||
|
||||
The `_source` mapping does not support the `format` option anymore. This option
|
||||
will still be accepted for indices created before the upgrade to 3.0 for backward
|
||||
compatibility, but it will have no effect. Indices created on or after 3.0 will
|
||||
will still be accepted for indices created before the upgrade to 5.0 for backward
|
||||
compatibility, but it will have no effect. Indices created on or after 5.0 will
|
||||
reject this option.
|
||||
|
||||
==== Object notation
|
||||
|
@ -368,12 +368,12 @@ Core types don't support the object notation anymore, which allowed to provide
|
|||
values as follows:
|
||||
|
||||
[source,json]
|
||||
-----
|
||||
---------------
|
||||
{
|
||||
"value": "field_value",
|
||||
"boost": 42
|
||||
}
|
||||
----
|
||||
---------------
|
||||
|
||||
==== `fielddata.format`
|
||||
|
||||
|
@ -382,7 +382,7 @@ enable doc values on a field. This no longer works: the only way to enable or
|
|||
disable doc values is by using the `doc_values` property of mappings.
|
||||
|
||||
|
||||
[[breaking_30_plugins]]
|
||||
[[breaking_50_plugins]]
|
||||
=== Plugin changes
|
||||
|
||||
The command `bin/plugin` has been renamed to `bin/elasticsearch-plugin`.
|
||||
|
@ -479,7 +479,7 @@ Mapper attachments has been deprecated. Users should use now the {plugins}/inges
|
|||
plugin.
|
||||
|
||||
|
||||
[[breaking_30_java_api_changes]]
|
||||
[[breaking_50_java_api_changes]]
|
||||
=== Java API changes
|
||||
|
||||
==== Count api has been removed
|
||||
|
@ -671,21 +671,28 @@ The inner DirectCandidateGenerator class has been moved out to its own class cal
|
|||
|
||||
The `setText` method has been changed to `setGlobalText` to make the intent more clear, and a `getGlobalText` method has been added.
|
||||
|
||||
[[breaking_30_cache_concurrency]]
|
||||
==== Elasticsearch will no longer detect logging implementations
|
||||
|
||||
Elasticsearch now logs only to log4j 1.2. Previously if log4j wasn't on the classpath it made some effort to degrade to
|
||||
slf4j or java.util.logging. Now it'll fail to work without the log4j 1.2 api. The log4j-over-slf4j bridge ought to work
|
||||
when using the java client. As should log4j 2's log4j-1.2-api. The Elasticsearch server now only supports log4j as
|
||||
configured by logging.yml and it no longer makes any effort to work if log4j isn't present.
|
||||
|
||||
[[breaking_50_cache_concurrency]]
|
||||
=== Cache concurrency level settings removed
|
||||
|
||||
Two cache concurrency level settings `indices.requests.cache.concurrency_level` and
|
||||
`indices.fielddata.cache.concurrency_level` because they no longer apply to the cache implementation used for the
|
||||
request cache and the field data cache.
|
||||
|
||||
[[breaking_30_non_loopback]]
|
||||
[[breaking_50_non_loopback]]
|
||||
=== Remove bind option of `non_loopback`
|
||||
|
||||
This setting would arbitrarily pick the first interface not marked as loopback. Instead, specify by address
|
||||
scope (e.g. `_local_,_site_` for all loopback and private network addresses) or by explicit interface names,
|
||||
hostnames, or addresses.
|
||||
|
||||
[[breaking_30_thread_pool]]
|
||||
[[breaking_50_thread_pool]]
|
||||
=== Forbid changing of thread pool types
|
||||
|
||||
Previously, <<modules-threadpool,thread pool types>> could be dynamically adjusted. The thread pool type effectively
|
||||
|
@ -694,7 +701,7 @@ and high risk of being misused. The ability to change the thread pool type for a
|
|||
that it is still possible to adjust relevant thread pool parameters for each of the thread pools (e.g., depending on
|
||||
the thread pool type, `keep_alive`, `queue_size`, etc.).
|
||||
|
||||
[[breaking_30_cpu_stats]]
|
||||
[[breaking_50_cpu_stats]]
|
||||
=== System CPU stats
|
||||
|
||||
The recent CPU usage (as a percent) has been added to the OS stats
|
||||
|
@ -725,7 +732,7 @@ CPU usage can be obtained from `OsStats.Cpu#getPercent`.
|
|||
Only stored fields are retrievable with this option.
|
||||
The fields option won't be able to load non stored fields from _source anymore.
|
||||
|
||||
[[breaking_30_allocation]]
|
||||
[[breaking_50_allocation]]
|
||||
=== Primary shard allocation
|
||||
|
||||
Previously, primary shards were only assigned if a quorum of shard copies were found (configurable using
|
||||
|
@ -762,7 +769,7 @@ in the case where shard copies can be found. Previously, a node not holding the
|
|||
holding shard copies were satisfying the allocation deciders. Now, the shard will be assigned to a node having a shard copy,
|
||||
even if none of the nodes holding a shard copy satisfy the allocation deciders.
|
||||
|
||||
[[breaking_30_percolator]]
|
||||
[[breaking_50_percolator]]
|
||||
=== Percolator
|
||||
|
||||
Adding percolator queries and modifications to existing percolator queries are no longer visible in immediately
|
||||
|
@ -785,7 +792,7 @@ unmapped fields are not persisted in the mapping.
|
|||
|
||||
Percolator documents are no longer excluded from the search response.
|
||||
|
||||
[[breaking_30_packaging]]
|
||||
[[breaking_50_packaging]]
|
||||
=== Packaging
|
||||
|
||||
==== Default logging using systemd (since Elasticsearch 2.2.0)
|
||||
|
@ -799,7 +806,7 @@ changed to now route standard output to the journal and standard error
|
|||
to inherit this setting (these are the defaults for systemd). These
|
||||
settings can be modified by editing the elasticsearch.service file.
|
||||
|
||||
[[breaking_30_scripting]]
|
||||
[[breaking_50_scripting]]
|
||||
=== Scripting
|
||||
|
||||
==== Script mode settings
|
||||
|
@ -811,7 +818,18 @@ values `off`, `false`, `0`, and `no` for disabling a scripting mode.
|
|||
The variants `on`, `1`, and `yes ` for enabling and `off`, `0`,
|
||||
and `no` for disabling are no longer supported.
|
||||
|
||||
[[breaking_30_term_vectors]]
|
||||
==== Groovy dependencies
|
||||
|
||||
In previous versions of Elasticsearch, the Groovy scripting capabilities
|
||||
depended on the `org.codehaus.groovy:groovy-all` artifact. In addition
|
||||
to pulling in the Groovy language, this pulls in a very large set of
|
||||
functionality, none of which is needed for scripting within
|
||||
Elasticsearch. Aside from the inherent difficulties in managing such a
|
||||
large set of dependencies, this also increases the surface area for
|
||||
security issues. This dependency has been reduced to the core Groovy
|
||||
language `org.codehaus.groovy:groovy` artifact.
|
||||
|
||||
[[breaking_50_term_vectors]]
|
||||
=== Term vectors
|
||||
|
||||
The term vectors APIs no longer persist unmapped fields in the mappings.
|
||||
|
@ -819,9 +837,8 @@ The term vectors APIs no longer persist unmapped fields in the mappings.
|
|||
The `dfs` parameter has been removed completely, term vectors don't support
|
||||
distributed document frequencies anymore.
|
||||
|
||||
[[breaking_30_security]]
|
||||
[[breaking_50_security]]
|
||||
=== Security
|
||||
|
||||
The option to disable the security manager `--security.manager.enabled` has been removed. In order to grant special
|
||||
permissions to elasticsearch users must tweak the local Java Security Policy.
|
||||
|
|
@ -30,8 +30,7 @@ as gossip routers. It provides the following settings with the
|
|||
|=======================================================================
|
||||
|Setting |Description
|
||||
|`hosts` |Either an array setting or a comma delimited setting. Each
|
||||
value is either in the form of `host:port`, or in the form of
|
||||
`host:port1-port2`. Note that IPv6 hosts must be bracketed. Defaults to
|
||||
value should be in the form of `host:port` or `host` (where `port` defaults to `9300`). Note that IPv6 hosts must be bracketed. Defaults to
|
||||
`127.0.0.1, [::1]`
|
||||
|=======================================================================
|
||||
|
||||
|
|
|
@ -163,9 +163,8 @@ settings, but may be further configured independently:
|
|||
|
||||
TCP Transport::
|
||||
|
||||
Used for communication between nodes in the cluster and by the Java
|
||||
{javaclient}/node-client.html[Node client],
|
||||
{javaclient}/transport-client.html[Transport client], and by the
|
||||
Used for communication between nodes in the cluster, by the Java
|
||||
{javaclient}/transport-client.html[Transport client] and by the
|
||||
<<modules-tribe,Tribe node>>. See the <<modules-transport,Transport module>>
|
||||
for more information.
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ sudo /bin/systemctl enable elasticsearch.service
|
|||
--------------------------------------------------
|
||||
|
||||
[float]
|
||||
=== YUM
|
||||
=== YUM / DNF
|
||||
|
||||
Download and install the public signing key:
|
||||
|
||||
|
@ -95,9 +95,9 @@ in a file with a `.repo` suffix, for example `elasticsearch.repo`
|
|||
--------------------------------------------------
|
||||
[elasticsearch-{major-version}]
|
||||
name=Elasticsearch repository for {major-version} packages
|
||||
baseurl=http://packages.elastic.co/elasticsearch/{major-version}/centos
|
||||
baseurl=https://packages.elastic.co/elasticsearch/{major-version}/centos
|
||||
gpgcheck=1
|
||||
gpgkey=http://packages.elastic.co/GPG-KEY-elasticsearch
|
||||
gpgkey=https://packages.elastic.co/GPG-KEY-elasticsearch
|
||||
enabled=1
|
||||
--------------------------------------------------
|
||||
|
||||
|
@ -108,6 +108,13 @@ And your repository is ready for use. You can install it with:
|
|||
yum install elasticsearch
|
||||
--------------------------------------------------
|
||||
|
||||
Or, for newer versions of Fedora and Redhat:
|
||||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
dnf install elasticsearch
|
||||
--------------------------------------------------
|
||||
|
||||
Configure Elasticsearch to automatically start during bootup. If your
|
||||
distribution is using SysV `init` (check with `ps -p 1`), then you will need to run:
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ import java.util.Locale;
|
|||
@ThreadLeakScope(Scope.NONE)
|
||||
@TimeoutSuite(millis = TimeUnits.HOUR)
|
||||
@LuceneTestCase.SuppressSysoutChecks(bugUrl = "we log a lot on purpose")
|
||||
public class TestIndexableBinaryStringTools extends LuceneTestCase {
|
||||
public class IndexableBinaryStringToolsTests extends LuceneTestCase {
|
||||
private static int NUM_RANDOM_TESTS;
|
||||
private static int MAX_RANDOM_BINARY_LENGTH;
|
||||
private static final String LINE_SEPARATOR = System.lineSeparator();
|
|
@ -31,7 +31,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.azure.AzureDiscovery;
|
||||
import org.elasticsearch.plugin.discovery.azure.AzureDiscoveryPlugin;
|
||||
|
||||
/**
|
||||
* Azure Module
|
||||
|
@ -72,8 +72,8 @@ public class AzureDiscoveryModule extends AbstractModule {
|
|||
*/
|
||||
public static boolean isDiscoveryReady(Settings settings, ESLogger logger) {
|
||||
// User set discovery.type: azure
|
||||
if (!AzureDiscovery.AZURE.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) {
|
||||
logger.trace("discovery.type not set to {}", AzureDiscovery.AZURE);
|
||||
if (!AzureDiscoveryPlugin.AZURE.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) {
|
||||
logger.trace("discovery.type not set to {}", AzureDiscoveryPlugin.AZURE);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -1,48 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.azure;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class AzureDiscovery extends ZenDiscovery {
|
||||
|
||||
public static final String AZURE = "azure";
|
||||
|
||||
@Inject
|
||||
public AzureDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService,
|
||||
DiscoverySettings discoverySettings, ElectMasterService electMasterService) {
|
||||
super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings,
|
||||
pingService, electMasterService, discoverySettings);
|
||||
}
|
||||
}
|
|
@ -27,8 +27,8 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.azure.AzureDiscovery;
|
||||
import org.elasticsearch.discovery.azure.AzureUnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.util.Collection;
|
||||
|
@ -36,6 +36,7 @@ import java.util.Collections;
|
|||
|
||||
public class AzureDiscoveryPlugin extends Plugin {
|
||||
|
||||
public static final String AZURE = "azure";
|
||||
private final Settings settings;
|
||||
protected final ESLogger logger = Loggers.getLogger(AzureDiscoveryPlugin.class);
|
||||
|
||||
|
@ -61,8 +62,8 @@ public class AzureDiscoveryPlugin extends Plugin {
|
|||
|
||||
public void onModule(DiscoveryModule discoveryModule) {
|
||||
if (AzureDiscoveryModule.isDiscoveryReady(settings, logger)) {
|
||||
discoveryModule.addDiscoveryType("azure", AzureDiscovery.class);
|
||||
discoveryModule.addUnicastHostProvider(AzureUnicastHostsProvider.class);
|
||||
discoveryModule.addDiscoveryType(AZURE, ZenDiscovery.class);
|
||||
discoveryModule.addUnicastHostProvider(AZURE, AzureUnicastHostsProvider.class);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,10 +20,6 @@
|
|||
package org.elasticsearch.cloud.aws;
|
||||
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.ec2.Ec2Discovery;
|
||||
|
||||
public class Ec2Module extends AbstractModule {
|
||||
|
||||
|
@ -32,17 +28,4 @@ public class Ec2Module extends AbstractModule {
|
|||
bind(AwsEc2Service.class).to(AwsEc2ServiceImpl.class).asEagerSingleton();
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if discovery is meant to start
|
||||
* @return true if we can start discovery features
|
||||
*/
|
||||
public static boolean isEc2DiscoveryActive(Settings settings, ESLogger logger) {
|
||||
// User set discovery.type: ec2
|
||||
if (!Ec2Discovery.EC2.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) {
|
||||
logger.trace("discovery.type not set to {}", Ec2Discovery.EC2);
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,49 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.ec2;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class Ec2Discovery extends ZenDiscovery {
|
||||
|
||||
public static final String EC2 = "ec2";
|
||||
|
||||
@Inject
|
||||
public Ec2Discovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService,
|
||||
DiscoverySettings discoverySettings,
|
||||
ElectMasterService electMasterService) {
|
||||
super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings,
|
||||
pingService, electMasterService, discoverySettings);
|
||||
}
|
||||
}
|
|
@ -32,7 +32,7 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.ec2.AwsEc2UnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.ec2.Ec2Discovery;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.security.AccessController;
|
||||
|
@ -45,6 +45,8 @@ import java.util.Collection;
|
|||
*/
|
||||
public class Ec2DiscoveryPlugin extends Plugin {
|
||||
|
||||
public static final String EC2 = "ec2";
|
||||
|
||||
// ClientConfiguration clinit has some classloader problems
|
||||
// TODO: fix that
|
||||
static {
|
||||
|
@ -98,10 +100,8 @@ public class Ec2DiscoveryPlugin extends Plugin {
|
|||
}
|
||||
|
||||
public void onModule(DiscoveryModule discoveryModule) {
|
||||
if (Ec2Module.isEc2DiscoveryActive(settings, logger)) {
|
||||
discoveryModule.addDiscoveryType("ec2", Ec2Discovery.class);
|
||||
discoveryModule.addUnicastHostProvider(AwsEc2UnicastHostsProvider.class);
|
||||
}
|
||||
discoveryModule.addDiscoveryType(EC2, ZenDiscovery.class);
|
||||
discoveryModule.addUnicastHostProvider(EC2, AwsEc2UnicastHostsProvider.class);
|
||||
}
|
||||
|
||||
public void onModule(SettingsModule settingsModule) {
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.discovery.ec2;
|
|||
|
||||
import com.amazonaws.Protocol;
|
||||
import org.elasticsearch.cloud.aws.AwsEc2Service;
|
||||
import org.elasticsearch.cloud.aws.Ec2Module;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.test.ESTestCase;
|
||||
|
||||
|
@ -30,21 +29,6 @@ import static org.hamcrest.Matchers.isEmptyString;
|
|||
|
||||
public class Ec2DiscoverySettingsTests extends ESTestCase {
|
||||
|
||||
public void testDiscoveryReady() {
|
||||
Settings settings = Settings.builder()
|
||||
.put("discovery.type", "ec2")
|
||||
.build();
|
||||
boolean discoveryReady = Ec2Module.isEc2DiscoveryActive(settings, logger);
|
||||
assertThat(discoveryReady, is(true));
|
||||
}
|
||||
|
||||
public void testDiscoveryNotReady() {
|
||||
Settings settings = Settings.EMPTY;
|
||||
boolean discoveryReady = Ec2Module.isEc2DiscoveryActive(settings, logger);
|
||||
assertThat(discoveryReady, is(false));
|
||||
}
|
||||
|
||||
|
||||
private static final Settings AWS = Settings.builder()
|
||||
.put(AwsEc2Service.KEY_SETTING.getKey(), "global-key")
|
||||
.put(AwsEc2Service.SECRET_SETTING.getKey(), "global-secret")
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
import org.elasticsearch.gradle.LoggedExec
|
||||
|
||||
esplugin {
|
||||
description 'The Google Compute Engine (GCE) Discovery plugin allows to use GCE API for the unicast discovery mechanism.'
|
||||
|
@ -21,6 +22,36 @@ dependencies {
|
|||
compile "commons-codec:commons-codec:${versions.commonscodec}"
|
||||
}
|
||||
|
||||
|
||||
// needed to be consistent with ssl host checking
|
||||
String host = InetAddress.getLoopbackAddress().getHostAddress();
|
||||
|
||||
// location of keystore and files to generate it
|
||||
File keystore = new File(project.buildDir, 'keystore/test-node.jks')
|
||||
|
||||
// generate the keystore
|
||||
task createKey(type: LoggedExec) {
|
||||
doFirst {
|
||||
project.delete(keystore.parentFile)
|
||||
keystore.parentFile.mkdirs()
|
||||
}
|
||||
executable = 'keytool'
|
||||
standardInput = new ByteArrayInputStream('FirstName LastName\nUnit\nOrganization\nCity\nState\nNL\nyes\n\n'.getBytes('UTF-8'))
|
||||
args '-genkey',
|
||||
'-alias', 'test-node',
|
||||
'-keystore', keystore,
|
||||
'-keyalg', 'RSA',
|
||||
'-keysize', '2048',
|
||||
'-validity', '712',
|
||||
'-dname', 'CN=' + host,
|
||||
'-keypass', 'keypass',
|
||||
'-storepass', 'keypass'
|
||||
}
|
||||
|
||||
// add keystore to test classpath: it expects it there
|
||||
sourceSets.test.resources.srcDir(keystore.parentFile)
|
||||
processTestResources.dependsOn(createKey)
|
||||
|
||||
dependencyLicenses {
|
||||
mapping from: /google-.*/, to: 'google'
|
||||
}
|
||||
|
|
|
@ -22,10 +22,8 @@ package org.elasticsearch.cloud.gce;
|
|||
import com.google.api.services.compute.model.Instance;
|
||||
import org.elasticsearch.common.component.LifecycleComponent;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
|
|
@ -25,6 +25,7 @@ import com.google.api.client.http.GenericUrl;
|
|||
import com.google.api.client.http.HttpHeaders;
|
||||
import com.google.api.client.http.HttpResponse;
|
||||
import com.google.api.client.http.HttpTransport;
|
||||
import com.google.api.client.http.javanet.NetHttpTransport;
|
||||
import com.google.api.client.json.JsonFactory;
|
||||
import com.google.api.client.json.jackson2.JacksonFactory;
|
||||
import com.google.api.services.compute.Compute;
|
||||
|
@ -36,12 +37,14 @@ import org.elasticsearch.cloud.gce.network.GceNameResolver;
|
|||
import org.elasticsearch.common.component.AbstractLifecycleComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.gce.RetryHttpInitializerWrapper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.URL;
|
||||
import java.nio.file.Files;
|
||||
import java.security.AccessController;
|
||||
import java.security.GeneralSecurityException;
|
||||
import java.security.PrivilegedAction;
|
||||
|
@ -51,18 +54,29 @@ import java.util.ArrayList;
|
|||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.function.Function;
|
||||
|
||||
public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceComputeService>
|
||||
implements GceComputeService {
|
||||
|
||||
// all settings just used for testing - not registered by default
|
||||
public static final Setting<Boolean> GCE_VALIDATE_CERTIFICATES =
|
||||
Setting.boolSetting("cloud.gce.validate_certificates", true, false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> GCE_HOST =
|
||||
new Setting<>("cloud.gce.host", "http://metadata.google.internal", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> GCE_ROOT_URL =
|
||||
new Setting<>("cloud.gce.root_url", "https://www.googleapis.com", Function.identity(), false, Setting.Scope.CLUSTER);
|
||||
|
||||
private final String project;
|
||||
private final List<String> zones;
|
||||
|
||||
// Forcing Google Token API URL as set in GCE SDK to
|
||||
// http://metadata/computeMetadata/v1/instance/service-accounts/default/token
|
||||
// See https://developers.google.com/compute/docs/metadata#metadataserver
|
||||
public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/instance";
|
||||
public static final String TOKEN_SERVER_ENCODED_URL = GCE_METADATA_URL + "/service-accounts/default/token";
|
||||
private final String gceHost;
|
||||
private final String metaDataUrl;
|
||||
private final String tokenServerEncodedUrl;
|
||||
private String gceRootUrl;
|
||||
|
||||
|
||||
@Override
|
||||
public Collection<Instance> instances() {
|
||||
|
@ -85,7 +99,7 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
|
|||
// assist type inference
|
||||
return instanceList.isEmpty() ? Collections.<Instance>emptyList() : instanceList.getItems();
|
||||
} catch (PrivilegedActionException e) {
|
||||
logger.warn("Problem fetching instance list for zone {}", zoneId);
|
||||
logger.warn("Problem fetching instance list for zone {}", e, zoneId);
|
||||
logger.debug("Full exception:", e);
|
||||
// assist type inference
|
||||
return Collections.<Instance>emptyList();
|
||||
|
@ -104,7 +118,7 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
|
|||
|
||||
@Override
|
||||
public String metadata(String metadataPath) throws IOException {
|
||||
String urlMetadataNetwork = GCE_METADATA_URL + "/" + metadataPath;
|
||||
String urlMetadataNetwork = this.metaDataUrl + "/" + metadataPath;
|
||||
logger.debug("get metadata from [{}]", urlMetadataNetwork);
|
||||
final URL url = new URL(urlMetadataNetwork);
|
||||
HttpHeaders headers;
|
||||
|
@ -153,17 +167,28 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
|
|||
/** Global instance of the JSON factory. */
|
||||
private JsonFactory gceJsonFactory;
|
||||
|
||||
private final boolean validateCerts;
|
||||
@Inject
|
||||
public GceComputeServiceImpl(Settings settings, NetworkService networkService) {
|
||||
super(settings);
|
||||
this.project = PROJECT_SETTING.get(settings);
|
||||
this.zones = ZONE_SETTING.get(settings);
|
||||
this.gceHost = GCE_HOST.get(settings);
|
||||
this.metaDataUrl = gceHost + "/computeMetadata/v1/instance";
|
||||
this.gceRootUrl = GCE_ROOT_URL.get(settings);
|
||||
tokenServerEncodedUrl = metaDataUrl + "/service-accounts/default/token";
|
||||
this.validateCerts = GCE_VALIDATE_CERTIFICATES.get(settings);
|
||||
networkService.addCustomNameResolver(new GceNameResolver(settings, this));
|
||||
}
|
||||
|
||||
protected synchronized HttpTransport getGceHttpTransport() throws GeneralSecurityException, IOException {
|
||||
if (gceHttpTransport == null) {
|
||||
gceHttpTransport = GoogleNetHttpTransport.newTrustedTransport();
|
||||
if (validateCerts) {
|
||||
gceHttpTransport = GoogleNetHttpTransport.newTrustedTransport();
|
||||
} else {
|
||||
// this is only used for testing - alternative we could use the defaul keystore but this requires special configs too..
|
||||
gceHttpTransport = new NetHttpTransport.Builder().doNotValidateCertificate().build();
|
||||
}
|
||||
}
|
||||
return gceHttpTransport;
|
||||
}
|
||||
|
@ -183,7 +208,7 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
|
|||
|
||||
logger.info("starting GCE discovery service");
|
||||
ComputeCredential credential = new ComputeCredential.Builder(getGceHttpTransport(), gceJsonFactory)
|
||||
.setTokenServerEncodedUrl(TOKEN_SERVER_ENCODED_URL)
|
||||
.setTokenServerEncodedUrl(this.tokenServerEncodedUrl)
|
||||
.build();
|
||||
|
||||
// hack around code messiness in GCE code
|
||||
|
@ -205,7 +230,9 @@ public class GceComputeServiceImpl extends AbstractLifecycleComponent<GceCompute
|
|||
refreshInterval = TimeValue.timeValueSeconds(credential.getExpiresInSeconds() - 1);
|
||||
}
|
||||
|
||||
Compute.Builder builder = new Compute.Builder(getGceHttpTransport(), gceJsonFactory, null).setApplicationName(VERSION);
|
||||
|
||||
Compute.Builder builder = new Compute.Builder(getGceHttpTransport(), gceJsonFactory, null).setApplicationName(VERSION)
|
||||
.setRootUrl(gceRootUrl);
|
||||
|
||||
if (RETRY_SETTING.exists(settings)) {
|
||||
TimeValue maxWait = MAX_WAIT_SETTING.get(settings);
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.gce;
|
||||
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.ClusterSettings;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class GceDiscovery extends ZenDiscovery {
|
||||
|
||||
public static final String GCE = "gce";
|
||||
|
||||
/**
|
||||
* discovery.gce.tags: The gce discovery can filter machines to include in the cluster based on tags.
|
||||
*/
|
||||
public static final Setting<List<String>> TAGS_SETTING =
|
||||
Setting.listSetting("discovery.gce.tags", Collections.emptyList(), s -> s, false, Setting.Scope.CLUSTER);
|
||||
|
||||
@Inject
|
||||
public GceDiscovery(Settings settings, ClusterName clusterName, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, ClusterSettings clusterSettings, ZenPingService pingService,
|
||||
DiscoverySettings discoverySettings,
|
||||
ElectMasterService electMasterService) {
|
||||
super(settings, clusterName, threadPool, transportService, clusterService, clusterSettings,
|
||||
pingService, electMasterService, discoverySettings);
|
||||
}
|
||||
}
|
|
@ -30,6 +30,7 @@ import org.elasticsearch.common.component.AbstractComponent;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.network.NetworkAddress;
|
||||
import org.elasticsearch.common.network.NetworkService;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
|
@ -40,6 +41,7 @@ import java.io.IOException;
|
|||
import java.net.InetAddress;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -47,6 +49,12 @@ import java.util.List;
|
|||
*/
|
||||
public class GceUnicastHostsProvider extends AbstractComponent implements UnicastHostsProvider {
|
||||
|
||||
/**
|
||||
* discovery.gce.tags: The gce discovery can filter machines to include in the cluster based on tags.
|
||||
*/
|
||||
public static final Setting<List<String>> TAGS_SETTING =
|
||||
Setting.listSetting("discovery.gce.tags", Collections.emptyList(), s -> s, false, Setting.Scope.CLUSTER);
|
||||
|
||||
static final class Status {
|
||||
private static final String TERMINATED = "TERMINATED";
|
||||
}
|
||||
|
@ -79,7 +87,7 @@ public class GceUnicastHostsProvider extends AbstractComponent implements Unicas
|
|||
this.project = GceComputeService.PROJECT_SETTING.get(settings);
|
||||
this.zones = GceComputeService.ZONE_SETTING.get(settings);
|
||||
|
||||
this.tags = GceDiscovery.TAGS_SETTING.get(settings);
|
||||
this.tags = TAGS_SETTING.get(settings);
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("using tags {}", this.tags);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ package org.elasticsearch.plugin.discovery.gce;
|
|||
|
||||
import com.google.api.client.http.HttpHeaders;
|
||||
import com.google.api.client.util.ClassInfo;
|
||||
|
||||
import org.elasticsearch.SpecialPermission;
|
||||
import org.elasticsearch.cloud.gce.GceComputeService;
|
||||
import org.elasticsearch.cloud.gce.GceModule;
|
||||
|
@ -32,8 +31,8 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.gce.GceDiscovery;
|
||||
import org.elasticsearch.discovery.gce.GceUnicastHostsProvider;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
import java.security.AccessController;
|
||||
|
@ -42,6 +41,9 @@ import java.util.Collection;
|
|||
import java.util.Collections;
|
||||
|
||||
public class GceDiscoveryPlugin extends Plugin {
|
||||
|
||||
public static final String GCE = "gce";
|
||||
|
||||
static {
|
||||
/*
|
||||
* GCE's http client changes access levels because its silly and we
|
||||
|
@ -93,18 +95,16 @@ public class GceDiscoveryPlugin extends Plugin {
|
|||
}
|
||||
|
||||
public void onModule(DiscoveryModule discoveryModule) {
|
||||
discoveryModule.addDiscoveryType("gce", GceDiscovery.class);
|
||||
discoveryModule.addDiscoveryType(GCE, ZenDiscovery.class);
|
||||
// If discovery.type: gce, we add Gce as a unicast provider
|
||||
if (GceDiscovery.GCE.equalsIgnoreCase(DiscoveryModule.DISCOVERY_TYPE_SETTING.get(settings))) {
|
||||
discoveryModule.addUnicastHostProvider(GceUnicastHostsProvider.class);
|
||||
}
|
||||
discoveryModule.addUnicastHostProvider(GCE, GceUnicastHostsProvider.class);
|
||||
}
|
||||
|
||||
public void onModule(SettingsModule settingsModule) {
|
||||
// Register GCE settings
|
||||
settingsModule.registerSetting(GceComputeService.PROJECT_SETTING);
|
||||
settingsModule.registerSetting(GceComputeService.ZONE_SETTING);
|
||||
settingsModule.registerSetting(GceDiscovery.TAGS_SETTING);
|
||||
settingsModule.registerSetting(GceUnicastHostsProvider.TAGS_SETTING);
|
||||
settingsModule.registerSetting(GceComputeService.REFRESH_SETTING);
|
||||
settingsModule.registerSetting(GceComputeService.RETRY_SETTING);
|
||||
settingsModule.registerSetting(GceComputeService.MAX_WAIT_SETTING);
|
||||
|
|
|
@ -20,5 +20,6 @@
|
|||
grant {
|
||||
// needed because of problems in gce
|
||||
permission java.lang.RuntimePermission "accessDeclaredMembers";
|
||||
permission java.lang.RuntimePermission "setFactory";
|
||||
permission java.lang.reflect.ReflectPermission "suppressAccessChecks";
|
||||
};
|
||||
|
|
|
@ -55,6 +55,8 @@ public class GceComputeServiceMock extends GceComputeServiceImpl {
|
|||
return this.mockHttpTransport;
|
||||
}
|
||||
|
||||
public static final String GCE_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/instance";
|
||||
|
||||
protected HttpTransport configureMock() {
|
||||
return new MockHttpTransport() {
|
||||
@Override
|
||||
|
@ -80,19 +82,18 @@ public class GceComputeServiceMock extends GceComputeServiceImpl {
|
|||
};
|
||||
}
|
||||
|
||||
private String readGoogleInternalJsonResponse(String url) throws IOException {
|
||||
public static String readGoogleInternalJsonResponse(String url) throws IOException {
|
||||
return readJsonResponse(url, "http://metadata.google.internal/");
|
||||
}
|
||||
|
||||
private String readGoogleApiJsonResponse(String url) throws IOException {
|
||||
public static String readGoogleApiJsonResponse(String url) throws IOException {
|
||||
return readJsonResponse(url, "https://www.googleapis.com/");
|
||||
}
|
||||
|
||||
private String readJsonResponse(String url, String urlRoot) throws IOException {
|
||||
private static String readJsonResponse(String url, String urlRoot) throws IOException {
|
||||
// We extract from the url the mock file path we want to use
|
||||
String mockFileName = Strings.replace(url, urlRoot, "");
|
||||
|
||||
logger.debug("--> read mock file from [{}]", mockFileName);
|
||||
URL resource = GceComputeServiceMock.class.getResource(mockFileName);
|
||||
if (resource == null) {
|
||||
throw new IOException("can't read [" + url + "] in src/test/resources/org/elasticsearch/discovery/gce");
|
||||
|
@ -106,7 +107,6 @@ public class GceComputeServiceMock extends GceComputeServiceImpl {
|
|||
}
|
||||
});
|
||||
String response = sb.toString();
|
||||
logger.trace("{}", response);
|
||||
return response;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,215 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.gce;
|
||||
|
||||
import com.sun.net.httpserver.Headers;
|
||||
import com.sun.net.httpserver.HttpServer;
|
||||
import com.sun.net.httpserver.HttpsConfigurator;
|
||||
import com.sun.net.httpserver.HttpsServer;
|
||||
import org.elasticsearch.cloud.gce.GceComputeServiceImpl;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.FileSystemUtils;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.SettingsModule;
|
||||
import org.elasticsearch.plugin.discovery.gce.GceDiscoveryPlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
||||
import javax.net.ssl.KeyManagerFactory;
|
||||
import javax.net.ssl.SSLContext;
|
||||
import javax.net.ssl.TrustManagerFactory;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.nio.charset.StandardCharsets;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.security.KeyStore;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoTimeout;
|
||||
|
||||
|
||||
@ESIntegTestCase.SuppressLocalMode
|
||||
@ESIntegTestCase.ClusterScope(numDataNodes = 2, numClientNodes = 0)
|
||||
@SuppressForbidden(reason = "use http server")
|
||||
// TODO this should be a IT but currently all ITs in this project run against a real cluster
|
||||
public class GceDiscoverTests extends ESIntegTestCase {
|
||||
|
||||
public static class TestPlugin extends Plugin {
|
||||
|
||||
@Override
|
||||
public String name() {
|
||||
return "GceDiscoverTests";
|
||||
}
|
||||
|
||||
@Override
|
||||
public String description() {
|
||||
return "GceDiscoverTests";
|
||||
}
|
||||
|
||||
public void onModule(SettingsModule module) {
|
||||
module.registerSetting(GceComputeServiceImpl.GCE_HOST);
|
||||
module.registerSetting(GceComputeServiceImpl.GCE_ROOT_URL);
|
||||
module.registerSetting(GceComputeServiceImpl.GCE_VALIDATE_CERTIFICATES);
|
||||
}
|
||||
}
|
||||
|
||||
private static HttpsServer httpsServer;
|
||||
private static HttpServer httpServer;
|
||||
private static Path logDir;
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return pluginList(GceDiscoveryPlugin.class, TestPlugin.class);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Settings nodeSettings(int nodeOrdinal) {
|
||||
Path resolve = logDir.resolve(Integer.toString(nodeOrdinal));
|
||||
try {
|
||||
Files.createDirectory(resolve);
|
||||
} catch (IOException e) {
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
return Settings.builder().put(super.nodeSettings(nodeOrdinal))
|
||||
.put("discovery.type", "gce")
|
||||
.put("path.logs", resolve)
|
||||
.put("transport.tcp.port", 0)
|
||||
.put("node.portsfile", "true")
|
||||
.put("cloud.gce.project_id", "testproject")
|
||||
.put("cloud.gce.zone", "primaryzone")
|
||||
.put("discovery.initial_state_timeout", "1s")
|
||||
.put("cloud.gce.host", "http://" + httpServer.getAddress().getHostName() + ":" + httpServer.getAddress().getPort())
|
||||
.put("cloud.gce.root_url", "https://" + httpsServer.getAddress().getHostName() +
|
||||
":" + httpsServer.getAddress().getPort())
|
||||
// this is annoying but by default the client pulls a static list of trusted CAs
|
||||
.put("cloud.gce.validate_certificates", false)
|
||||
.build();
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void startHttpd() throws Exception {
|
||||
logDir = createTempDir();
|
||||
SSLContext sslContext = getSSLContext();
|
||||
httpsServer = HttpsServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0);
|
||||
httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress().getHostAddress(), 0), 0);
|
||||
httpsServer.setHttpsConfigurator(new HttpsConfigurator(sslContext));
|
||||
httpServer.createContext("/computeMetadata/v1/instance/service-accounts/default/token", (s) -> {
|
||||
String response = GceComputeServiceMock.readGoogleInternalJsonResponse(
|
||||
"http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token");
|
||||
byte[] responseAsBytes = response.getBytes(StandardCharsets.UTF_8);
|
||||
s.sendResponseHeaders(200, responseAsBytes.length);
|
||||
OutputStream responseBody = s.getResponseBody();
|
||||
responseBody.write(responseAsBytes);
|
||||
responseBody.close();
|
||||
});
|
||||
|
||||
httpsServer.createContext("/compute/v1/projects/testproject/zones/primaryzone/instances", (s) -> {
|
||||
Headers headers = s.getResponseHeaders();
|
||||
headers.add("Content-Type", "application/json; charset=UTF-8");
|
||||
ESLogger logger = Loggers.getLogger(GceDiscoverTests.class);
|
||||
try {
|
||||
Path[] files = FileSystemUtils.files(logDir);
|
||||
StringBuilder builder = new StringBuilder("{\"id\": \"dummy\",\"items\":[");
|
||||
int foundFiles = 0;
|
||||
for (int i = 0; i < files.length; i++) {
|
||||
Path resolve = files[i].resolve("transport.ports");
|
||||
if (Files.exists(resolve)) {
|
||||
if (foundFiles++ > 0) {
|
||||
builder.append(",");
|
||||
}
|
||||
List<String> addressses = Files.readAllLines(resolve);
|
||||
Collections.shuffle(addressses, random());
|
||||
logger.debug("addresses for node: [{}] published addresses [{}]", files[i].getFileName(), addressses);
|
||||
builder.append("{\"description\": \"ES Node ").append(files[i].getFileName())
|
||||
.append("\",\"networkInterfaces\": [ {");
|
||||
builder.append("\"networkIP\": \"").append(addressses.get(0)).append("\"}],");
|
||||
builder.append("\"status\" : \"RUNNING\"}");
|
||||
}
|
||||
}
|
||||
builder.append("]}");
|
||||
String responseString = builder.toString();
|
||||
final byte[] responseAsBytes = responseString.getBytes(StandardCharsets.UTF_8);
|
||||
s.sendResponseHeaders(200, responseAsBytes.length);
|
||||
OutputStream responseBody = s.getResponseBody();
|
||||
responseBody.write(responseAsBytes);
|
||||
responseBody.close();
|
||||
} catch (Exception e) {
|
||||
//
|
||||
byte[] responseAsBytes = ("{ \"error\" : {\"message\" : \"" + e.toString() + "\" } }").getBytes(StandardCharsets.UTF_8);
|
||||
s.sendResponseHeaders(500, responseAsBytes.length);
|
||||
OutputStream responseBody = s.getResponseBody();
|
||||
responseBody.write(responseAsBytes);
|
||||
responseBody.close();
|
||||
}
|
||||
|
||||
|
||||
});
|
||||
httpsServer.start();
|
||||
httpServer.start();
|
||||
}
|
||||
|
||||
private static SSLContext getSSLContext() throws Exception{
|
||||
char[] passphrase = "keypass".toCharArray();
|
||||
KeyStore ks = KeyStore.getInstance("JKS");
|
||||
try (InputStream stream = GceDiscoverTests.class.getResourceAsStream("/test-node.jks")) {
|
||||
assertNotNull("can't find keystore file", stream);
|
||||
ks.load(stream, passphrase);
|
||||
}
|
||||
KeyManagerFactory kmf = KeyManagerFactory.getInstance("SunX509");
|
||||
kmf.init(ks, passphrase);
|
||||
TrustManagerFactory tmf = TrustManagerFactory.getInstance("SunX509");
|
||||
tmf.init(ks);
|
||||
SSLContext ssl = SSLContext.getInstance("TLS");
|
||||
ssl.init(kmf.getKeyManagers(), tmf.getTrustManagers(), null);
|
||||
return ssl;
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
public static void stopHttpd() throws IOException {
|
||||
for (int i = 0; i < internalCluster().size(); i++) {
|
||||
// shut them all down otherwise we get spammed with connection refused exceptions
|
||||
internalCluster().stopRandomDataNode();
|
||||
}
|
||||
httpsServer.stop(0);
|
||||
httpServer.stop(0);
|
||||
httpsServer = null;
|
||||
httpServer = null;
|
||||
logDir = null;
|
||||
}
|
||||
|
||||
public void testJoin() throws ExecutionException, InterruptedException {
|
||||
// only wait for the cluster to form
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(2)).get());
|
||||
// add one more node and wait for it to join
|
||||
internalCluster().startDataOnlyNodeAsync().get();
|
||||
assertNoTimeout(client().admin().cluster().prepareHealth().setWaitForNodes(Integer.toString(3)).get());
|
||||
}
|
||||
}
|
|
@ -131,7 +131,7 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
Settings nodeSettings = Settings.builder()
|
||||
.put(GceComputeService.PROJECT_SETTING.getKey(), projectName)
|
||||
.put(GceComputeService.ZONE_SETTING.getKey(), "europe-west1-b")
|
||||
.putArray(GceDiscovery.TAGS_SETTING.getKey(), "elasticsearch")
|
||||
.putArray(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch")
|
||||
.build();
|
||||
mock = new GceComputeServiceMock(nodeSettings, networkService);
|
||||
List<DiscoveryNode> discoveryNodes = buildDynamicNodes(mock, nodeSettings);
|
||||
|
@ -143,7 +143,7 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
Settings nodeSettings = Settings.builder()
|
||||
.put(GceComputeService.PROJECT_SETTING.getKey(), projectName)
|
||||
.put(GceComputeService.ZONE_SETTING.getKey(), "europe-west1-b")
|
||||
.putArray(GceDiscovery.TAGS_SETTING.getKey(), "elasticsearch", "dev")
|
||||
.putArray(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch", "dev")
|
||||
.build();
|
||||
mock = new GceComputeServiceMock(nodeSettings, networkService);
|
||||
List<DiscoveryNode> discoveryNodes = buildDynamicNodes(mock, nodeSettings);
|
||||
|
@ -165,7 +165,7 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
Settings nodeSettings = Settings.builder()
|
||||
.put(GceComputeService.PROJECT_SETTING.getKey(), projectName)
|
||||
.put(GceComputeService.ZONE_SETTING.getKey(), "europe-west1-b")
|
||||
.putArray(GceDiscovery.TAGS_SETTING.getKey(), "elasticsearch")
|
||||
.putArray(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch")
|
||||
.build();
|
||||
mock = new GceComputeServiceMock(nodeSettings, networkService);
|
||||
List<DiscoveryNode> discoveryNodes = buildDynamicNodes(mock, nodeSettings);
|
||||
|
@ -176,7 +176,7 @@ public class GceDiscoveryTests extends ESTestCase {
|
|||
Settings nodeSettings = Settings.builder()
|
||||
.put(GceComputeService.PROJECT_SETTING.getKey(), projectName)
|
||||
.put(GceComputeService.ZONE_SETTING.getKey(), "europe-west1-b")
|
||||
.putArray(GceDiscovery.TAGS_SETTING.getKey(), "elasticsearch", "dev")
|
||||
.putArray(GceUnicastHostsProvider.TAGS_SETTING.getKey(), "elasticsearch", "dev")
|
||||
.build();
|
||||
mock = new GceComputeServiceMock(nodeSettings, networkService);
|
||||
List<DiscoveryNode> discoveryNodes = buildDynamicNodes(mock, nodeSettings);
|
||||
|
|
|
@ -27,7 +27,7 @@ import org.elasticsearch.indices.IndicesModule;
|
|||
import org.elasticsearch.test.ESTestCase;
|
||||
import org.junit.Before;
|
||||
|
||||
public class AttachmentUnitTestCase extends ESTestCase {
|
||||
public abstract class AttachmentUnitTestCase extends ESTestCase {
|
||||
|
||||
protected Settings testSettings;
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ esplugin {
|
|||
}
|
||||
|
||||
dependencies {
|
||||
compile 'com.microsoft.azure:azure-storage:2.0.0'
|
||||
compile 'com.microsoft.azure:azure-storage:4.0.0'
|
||||
compile 'org.apache.commons:commons-lang3:3.3.2'
|
||||
}
|
||||
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
b970c65a38da0569013e0c76de7c404f842496c2
|
|
@ -0,0 +1 @@
|
|||
b31504f0fb3f9c4458ad053b426357a9b0df6e08
|
|
@ -41,7 +41,7 @@ public interface AzureStorageService {
|
|||
|
||||
final class Storage {
|
||||
public static final String PREFIX = "cloud.azure.storage.";
|
||||
public static final Setting<TimeValue> TIMEOUT_SETTING = Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueMinutes(5), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<TimeValue> TIMEOUT_SETTING = Setting.timeSetting("cloud.azure.storage.timeout", TimeValue.timeValueSeconds(-1), false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> ACCOUNT_SETTING = Setting.simpleString("repositories.azure.account", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> CONTAINER_SETTING = Setting.simpleString("repositories.azure.container", false, Setting.Scope.CLUSTER);
|
||||
public static final Setting<String> BASE_PATH_SETTING = Setting.simpleString("repositories.azure.base_path", false, Setting.Scope.CLUSTER);
|
||||
|
|
|
@ -121,13 +121,15 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent<AzureSto
|
|||
// only one mode per storage account can be active at a time
|
||||
client.getDefaultRequestOptions().setLocationMode(mode);
|
||||
|
||||
// Set timeout option. Defaults to 5mn. See cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout
|
||||
try {
|
||||
int timeout = (int) azureStorageSettings.getTimeout().getMillis();
|
||||
client.getDefaultRequestOptions().setMaximumExecutionTimeInMs(timeout);
|
||||
} catch (ClassCastException e) {
|
||||
throw new IllegalArgumentException("Can not convert [" + azureStorageSettings.getTimeout() +
|
||||
"]. It can not be longer than 2,147,483,647ms.");
|
||||
// Set timeout option if the user sets cloud.azure.storage.timeout or cloud.azure.storage.xxx.timeout (it's negative by default)
|
||||
if (azureStorageSettings.getTimeout().getSeconds() > 0) {
|
||||
try {
|
||||
int timeout = (int) azureStorageSettings.getTimeout().getMillis();
|
||||
client.getDefaultRequestOptions().setTimeoutIntervalInMs(timeout);
|
||||
} catch (ClassCastException e) {
|
||||
throw new IllegalArgumentException("Can not convert [" + azureStorageSettings.getTimeout() +
|
||||
"]. It can not be longer than 2,147,483,647ms.");
|
||||
}
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
@ -273,7 +275,7 @@ public class AzureStorageServiceImpl extends AbstractLifecycleComponent<AzureSto
|
|||
CloudBlockBlob blobSource = blob_container.getBlockBlobReference(sourceBlob);
|
||||
if (blobSource.exists()) {
|
||||
CloudBlockBlob blobTarget = blob_container.getBlockBlobReference(targetBlob);
|
||||
blobTarget.startCopyFromBlob(blobSource);
|
||||
blobTarget.startCopy(blobSource);
|
||||
blobSource.delete();
|
||||
logger.debug("moveBlob container [{}], sourceBlob [{}], targetBlob [{}] -> done", container, sourceBlob, targetBlob);
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@ import org.elasticsearch.test.ESTestCase;
|
|||
import java.net.URI;
|
||||
|
||||
import static org.hamcrest.Matchers.is;
|
||||
import static org.hamcrest.Matchers.lessThan;
|
||||
import static org.hamcrest.Matchers.nullValue;
|
||||
|
||||
public class AzureStorageServiceTests extends ESTestCase {
|
||||
final static Settings settings = Settings.builder()
|
||||
|
@ -126,18 +128,30 @@ public class AzureStorageServiceTests extends ESTestCase {
|
|||
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings);
|
||||
azureStorageService.doStart();
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client1.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(10 * 1000));
|
||||
assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(10 * 1000));
|
||||
CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client3.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(30 * 1000));
|
||||
assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientDefaultTimeout() {
|
||||
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(settings);
|
||||
azureStorageService.doStart();
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure1", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client1.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(5 * 60 * 1000));
|
||||
assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), nullValue());
|
||||
CloudBlobClient client3 = azureStorageService.getSelectedClient("azure3", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client3.getDefaultRequestOptions().getMaximumExecutionTimeInMs(), is(30 * 1000));
|
||||
assertThat(client3.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(30 * 1000));
|
||||
}
|
||||
|
||||
public void testGetSelectedClientNoTimeout() {
|
||||
Settings timeoutSettings = Settings.builder()
|
||||
.put("cloud.azure.storage.azure.account", "myaccount")
|
||||
.put("cloud.azure.storage.azure.key", "mykey")
|
||||
.build();
|
||||
|
||||
AzureStorageServiceImpl azureStorageService = new AzureStorageServiceMock(timeoutSettings);
|
||||
azureStorageService.doStart();
|
||||
CloudBlobClient client1 = azureStorageService.getSelectedClient("azure", LocationMode.PRIMARY_ONLY);
|
||||
assertThat(client1.getDefaultRequestOptions().getTimeoutIntervalInMs(), is(nullValue()));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -23,9 +23,9 @@ import org.apache.lucene.util.IOUtils;
|
|||
import org.elasticsearch.client.Client;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.InternalClusterService;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.discovery.DiscoveryService;
|
||||
import org.elasticsearch.env.Environment;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
@ -66,14 +66,14 @@ public class TribeUnitTests extends ESTestCase {
|
|||
.put(baseSettings)
|
||||
.put("cluster.name", "tribe1")
|
||||
.put("node.name", "tribe1_node")
|
||||
.put(DiscoveryService.DISCOVERY_SEED_SETTING.getKey(), random().nextLong())
|
||||
.put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), random().nextLong())
|
||||
.build()).start();
|
||||
tribe2 = new TribeClientNode(
|
||||
Settings.builder()
|
||||
.put(baseSettings)
|
||||
.put("cluster.name", "tribe2")
|
||||
.put("node.name", "tribe2_node")
|
||||
.put(DiscoveryService.DISCOVERY_SEED_SETTING.getKey(), random().nextLong())
|
||||
.put(InternalClusterService.NODE_ID_SEED_SETTING.getKey(), random().nextLong())
|
||||
.build()).start();
|
||||
}
|
||||
|
||||
|
@ -88,8 +88,8 @@ public class TribeUnitTests extends ESTestCase {
|
|||
System.setProperty("es.cluster.name", "tribe_node_cluster");
|
||||
System.setProperty("es.tribe.t1.cluster.name", "tribe1");
|
||||
System.setProperty("es.tribe.t2.cluster.name", "tribe2");
|
||||
System.setProperty("es.tribe.t1.discovery.id.seed", Long.toString(random().nextLong()));
|
||||
System.setProperty("es.tribe.t2.discovery.id.seed", Long.toString(random().nextLong()));
|
||||
System.setProperty("es.tribe.t1.node_id.seed", Long.toString(random().nextLong()));
|
||||
System.setProperty("es.tribe.t2.node_id.seed", Long.toString(random().nextLong()));
|
||||
|
||||
try {
|
||||
assertTribeNodeSuccessfullyCreated(Settings.EMPTY);
|
||||
|
@ -97,8 +97,8 @@ public class TribeUnitTests extends ESTestCase {
|
|||
System.clearProperty("es.cluster.name");
|
||||
System.clearProperty("es.tribe.t1.cluster.name");
|
||||
System.clearProperty("es.tribe.t2.cluster.name");
|
||||
System.clearProperty("es.tribe.t1.discovery.id.seed");
|
||||
System.clearProperty("es.tribe.t2.discovery.id.seed");
|
||||
System.clearProperty("es.tribe.t1.node_id.seed");
|
||||
System.clearProperty("es.tribe.t2.node_id.seed");
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
cluster.name: tribe_node_cluster
|
||||
tribe.t1.cluster.name: tribe1
|
||||
tribe.t2.cluster.name: tribe2
|
||||
tribe.t1.discovery.id.seed: 1
|
||||
tribe.t2.discovery.id.seed: 2
|
||||
tribe.t1.node_id.seed: 1
|
||||
tribe.t2.node_id.seed: 2
|
||||
|
|
|
@ -32,7 +32,7 @@ import org.junit.Before;
|
|||
|
||||
import java.util.Collections;
|
||||
|
||||
public abstract class AbstractMustacheTests extends ESTestCase {
|
||||
public abstract class AbstractMustacheTestCase extends ESTestCase {
|
||||
|
||||
protected TemplateService templateService;
|
||||
|
||||
|
@ -43,12 +43,12 @@ public abstract class AbstractMustacheTests extends ESTestCase {
|
|||
.put(ScriptService.SCRIPT_AUTO_RELOAD_ENABLED_SETTING.getKey(), false)
|
||||
.build();
|
||||
MustacheScriptEngineService mustache = new MustacheScriptEngineService(settings);
|
||||
ScriptEngineRegistry scriptEngineRegistry =
|
||||
new ScriptEngineRegistry(Collections.singletonList(new ScriptEngineRegistry.ScriptEngineRegistration(MustacheScriptEngineService.class, MustacheScriptEngineService.TYPES)));
|
||||
ScriptEngineRegistry scriptEngineRegistry = new ScriptEngineRegistry(Collections.singletonList(
|
||||
new ScriptEngineRegistry.ScriptEngineRegistration(MustacheScriptEngineService.class, MustacheScriptEngineService.TYPES)));
|
||||
ScriptContextRegistry scriptContextRegistry = new ScriptContextRegistry(Collections.emptyList());
|
||||
ScriptSettings scriptSettings = new ScriptSettings(scriptEngineRegistry, scriptContextRegistry);
|
||||
ScriptService scriptService =
|
||||
new ScriptService(settings, new Environment(settings), Collections.singleton(mustache), null, scriptEngineRegistry, scriptContextRegistry, scriptSettings);
|
||||
ScriptService scriptService = new ScriptService(settings, new Environment(settings), Collections.singleton(mustache), null,
|
||||
scriptEngineRegistry, scriptContextRegistry, scriptSettings);
|
||||
templateService = new InternalTemplateService(scriptService);
|
||||
}
|
||||
|
|
@ -31,7 +31,7 @@ import java.util.Map;
|
|||
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
||||
public class IngestDocumentMustacheIT extends AbstractMustacheTests {
|
||||
public class IngestDocumentMustacheIT extends AbstractMustacheTestCase {
|
||||
|
||||
public void testAccessMetaDataViaTemplate() {
|
||||
Map<String, Object> document = new HashMap<>();
|
||||
|
|
|
@ -26,7 +26,7 @@ import java.util.Collections;
|
|||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
public class IngestMustacheRemoveProcessorIT extends AbstractMustacheTests {
|
||||
public class IngestMustacheRemoveProcessorIT extends AbstractMustacheTestCase {
|
||||
|
||||
public void testRemoveProcessorMustacheExpression() throws Exception {
|
||||
RemoveProcessor.Factory factory = new RemoveProcessor.Factory(templateService);
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue