Merge remote-tracking branch 'upstream/master' into rolling_upgrades
This commit is contained in:
commit
ce86ed1fdd
|
@ -364,10 +364,12 @@ These are the linux flavors the Vagrantfile currently supports:
|
|||
* ubuntu-1204 aka precise
|
||||
* ubuntu-1404 aka trusty
|
||||
* ubuntu-1504 aka vivid
|
||||
* ubuntu-1604 aka xenial
|
||||
* debian-8 aka jessie, the current debian stable distribution
|
||||
* centos-6
|
||||
* centos-7
|
||||
* fedora-22
|
||||
* fedora-24
|
||||
* oel-6 aka Oracle Enterprise Linux 6
|
||||
* oel-7 aka Oracle Enterprise Linux 7
|
||||
* sles-12
|
||||
* opensuse-13
|
||||
|
@ -376,7 +378,6 @@ We're missing the following from the support matrix because there aren't high
|
|||
quality boxes available in vagrant atlas:
|
||||
|
||||
* sles-11
|
||||
* oel-6
|
||||
|
||||
We're missing the follow because our tests are very linux/bash centric:
|
||||
|
||||
|
|
|
@ -37,6 +37,13 @@ Vagrant.configure(2) do |config|
|
|||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
config.vm.define "ubuntu-1604" do |config|
|
||||
config.vm.box = "elastic/ubuntu-16.04-x86_64"
|
||||
ubuntu_common config, extra: <<-SHELL
|
||||
# Install Jayatana so we can work around it being present.
|
||||
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
|
||||
SHELL
|
||||
end
|
||||
# Wheezy's backports don't contain Openjdk 8 and the backflips required to
|
||||
# get the sun jdk on there just aren't worth it. We have jessie for testing
|
||||
# debian and it works fine.
|
||||
|
|
|
@ -157,7 +157,7 @@ class BuildPlugin implements Plugin<Project> {
|
|||
private static String findJavaHome() {
|
||||
String javaHome = System.getenv('JAVA_HOME')
|
||||
if (javaHome == null) {
|
||||
if (System.getProperty("idea.active") != null) {
|
||||
if (System.getProperty("idea.active") != null || System.getProperty("eclipse.launcher") != null) {
|
||||
// intellij doesn't set JAVA_HOME, so we use the jdk gradle was run with
|
||||
javaHome = Jvm.current().javaHome
|
||||
} else {
|
||||
|
@ -405,9 +405,9 @@ class BuildPlugin implements Plugin<Project> {
|
|||
//options.incremental = true
|
||||
|
||||
if (project.javaVersion == JavaVersion.VERSION_1_9) {
|
||||
// hack until gradle supports java 9's new "-release" arg
|
||||
// hack until gradle supports java 9's new "--release" arg
|
||||
assert minimumJava == JavaVersion.VERSION_1_8
|
||||
options.compilerArgs << '-release' << '8'
|
||||
options.compilerArgs << '--release' << '8'
|
||||
project.sourceCompatibility = null
|
||||
project.targetCompatibility = null
|
||||
}
|
||||
|
|
|
@ -148,6 +148,9 @@ class PrecommitTasks {
|
|||
checkstyleTask.dependsOn(task)
|
||||
task.dependsOn(copyCheckstyleConf)
|
||||
task.inputs.file(checkstyleSuppressions)
|
||||
task.reports {
|
||||
html.enabled false
|
||||
}
|
||||
}
|
||||
}
|
||||
return checkstyleTask
|
||||
|
|
|
@ -238,7 +238,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]IncompatibleClusterStateVersionException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]InternalClusterInfoService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]LocalNodeMasterListener.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeIndexDeletedAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]index[/\\]NodeMappingRefreshAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]action[/\\]shard[/\\]ShardStateAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]block[/\\]ClusterBlock.java" checks="LineLength" />
|
||||
|
@ -415,7 +414,6 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]mapper[/\\]RootObjectMapper.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]merge[/\\]MergeStats.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]AbstractQueryBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]MatchQueryParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryBuilders.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]QueryValidationException.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]query[/\\]support[/\\]InnerHitsQueryParserHelper.java" checks="LineLength" />
|
||||
|
@ -488,15 +486,12 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestPendingClusterTasksAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestShardsAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]rest[/\\]action[/\\]cat[/\\]RestThreadPoolAction.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]AbstractScriptParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptContextRegistry.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptModes.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptParameterParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]ScriptSettings.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]script[/\\]Template.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]MultiValueMode.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]SearchService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]AggregatorFactories.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]InternalMultiBucketAggregation.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]ValuesSourceAggregationBuilder.java" checks="LineLength" />
|
||||
|
@ -555,29 +550,19 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]ValuesSourceParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueFormat.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]aggregations[/\\]support[/\\]format[/\\]ValueParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]builder[/\\]SearchSourceBuilder.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]controller[/\\]SearchPhaseController.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]AggregatedDfs.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]dfs[/\\]DfsSearchResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSearchResult.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhaseParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]DefaultSearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]FilteredSearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]InternalSearchHit.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]SearchContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]internal[/\\]ShardSearchTransportRequest.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]FieldLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafDocLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]lookup[/\\]LeafFieldsLookup.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]query[/\\]QueryPhase.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]QueryRescorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]rescore[/\\]RescoreParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]GeoDistanceSortParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]ScriptSortParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]sort[/\\]SortParseElement.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestContextParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]SuggestUtils.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]CompletionSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]CategoryContextMapping.java" checks="LineLength" />
|
||||
|
@ -586,9 +571,7 @@
|
|||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]completion[/\\]context[/\\]GeoQueryContext.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]CandidateScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]NoisyChannelSpellChecker.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]PhraseSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]phrase[/\\]WordScorer.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]suggest[/\\]term[/\\]TermSuggestParser.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]RestoreService.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardFailure.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]snapshots[/\\]SnapshotShardsService.java" checks="LineLength" />
|
||||
|
@ -952,7 +935,6 @@
|
|||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]basic[/\\]TransportTwoNodesSearchIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ChildQuerySearchIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]child[/\\]ParentFieldLoadingIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]fetch[/\\]FetchSubPhasePluginIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoBoundingBoxIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoFilterIT.java" checks="LineLength" />
|
||||
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]search[/\\]geo[/\\]GeoShapeQueryTests.java" checks="LineLength" />
|
||||
|
@ -1007,7 +989,6 @@
|
|||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolateRequestBuilder.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateShardResponse.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TransportShardMultiPercolateAction.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
|
||||
<suppress files="modules[/\\]percolator[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
|
||||
|
@ -1084,7 +1065,6 @@
|
|||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]IndexSettingsModule.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]InternalTestCluster.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]MockIndexEventListener.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]TestSearchContext.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]cluster[/\\]NoopClusterService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]cluster[/\\]TestClusterService.java" checks="LineLength" />
|
||||
<suppress files="test[/\\]framework[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]test[/\\]discovery[/\\]ClusterDiscoveryConfiguration.java" checks="LineLength" />
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
elasticsearch = 5.0.0-alpha6
|
||||
elasticsearch = 6.0.0-alpha1
|
||||
lucene = 6.2.0
|
||||
|
||||
# optional dependencies
|
||||
|
|
|
@ -72,7 +72,7 @@ public class RestNoopBulkAction extends BaseRestHandler {
|
|||
}
|
||||
bulkRequest.timeout(request.paramAsTime("timeout", BulkShardRequest.DEFAULT_TIMEOUT));
|
||||
bulkRequest.setRefreshPolicy(request.param("refresh"));
|
||||
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, defaultPipeline, null, true);
|
||||
bulkRequest.add(request.content(), defaultIndex, defaultType, defaultRouting, defaultFields, null, defaultPipeline, null, true);
|
||||
|
||||
// short circuit the call to the transport layer
|
||||
BulkRestBuilderListener listener = new BulkRestBuilderListener(channel, request);
|
||||
|
|
|
@ -0,0 +1,392 @@
|
|||
/*
|
||||
* Licensed to the Apache Software Foundation (ASF) under one or more
|
||||
* contributor license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright ownership.
|
||||
* The ASF licenses this file to You under the Apache license, Version 2.0
|
||||
* (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the license for the specific language governing permissions and
|
||||
* limitations under the license.
|
||||
*/
|
||||
package org.apache.logging.log4j.core.jmx;
|
||||
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Executor;
|
||||
import java.util.concurrent.ExecutorService;
|
||||
import java.util.concurrent.Executors;
|
||||
|
||||
import javax.management.InstanceAlreadyExistsException;
|
||||
import javax.management.MBeanRegistrationException;
|
||||
import javax.management.MBeanServer;
|
||||
import javax.management.NotCompliantMBeanException;
|
||||
import javax.management.ObjectName;
|
||||
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.core.Appender;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.apache.logging.log4j.core.appender.AsyncAppender;
|
||||
import org.apache.logging.log4j.core.async.AsyncLoggerConfig;
|
||||
import org.apache.logging.log4j.core.async.AsyncLoggerContext;
|
||||
import org.apache.logging.log4j.core.async.DaemonThreadFactory;
|
||||
import org.apache.logging.log4j.core.config.LoggerConfig;
|
||||
import org.apache.logging.log4j.core.impl.Log4jContextFactory;
|
||||
import org.apache.logging.log4j.core.selector.ContextSelector;
|
||||
import org.apache.logging.log4j.core.util.Constants;
|
||||
import org.apache.logging.log4j.spi.LoggerContextFactory;
|
||||
import org.apache.logging.log4j.status.StatusLogger;
|
||||
import org.apache.logging.log4j.util.PropertiesUtil;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
|
||||
/**
|
||||
* Creates MBeans to instrument various classes in the log4j class hierarchy.
|
||||
* <p>
|
||||
* All instrumentation for Log4j 2 classes can be disabled by setting system property {@code -Dlog4j2.disable.jmx=true}.
|
||||
* </p>
|
||||
*/
|
||||
@SuppressForbidden(reason = "copied class to hack around Log4j bug")
|
||||
public final class Server {
|
||||
|
||||
/**
|
||||
* The domain part, or prefix ({@value}) of the {@code ObjectName} of all MBeans that instrument Log4J2 components.
|
||||
*/
|
||||
public static final String DOMAIN = "org.apache.logging.log4j2";
|
||||
private static final String PROPERTY_DISABLE_JMX = "log4j2.disable.jmx";
|
||||
private static final String PROPERTY_ASYNC_NOTIF = "log4j2.jmx.notify.async";
|
||||
private static final String THREAD_NAME_PREFIX = "log4j2.jmx.notif";
|
||||
private static final StatusLogger LOGGER = StatusLogger.getLogger();
|
||||
static final Executor executor = isJmxDisabled() ? null : createExecutor();
|
||||
|
||||
private Server() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns either a {@code null} Executor (causing JMX notifications to be sent from the caller thread) or a daemon
|
||||
* background thread Executor, depending on the value of system property "log4j2.jmx.notify.async". If this
|
||||
* property is not set, use a {@code null} Executor for web apps to avoid memory leaks and other issues when the
|
||||
* web app is restarted.
|
||||
* @see <a href="https://issues.apache.org/jira/browse/LOG4J2-938">LOG4J2-938</a>
|
||||
*/
|
||||
private static ExecutorService createExecutor() {
|
||||
final boolean defaultAsync = !Constants.IS_WEB_APP;
|
||||
final boolean async = PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_ASYNC_NOTIF, defaultAsync);
|
||||
return async ? Executors.newFixedThreadPool(1, new DaemonThreadFactory(THREAD_NAME_PREFIX)) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Either returns the specified name as is, or returns a quoted value containing the specified name with the special
|
||||
* characters (comma, equals, colon, quote, asterisk, or question mark) preceded with a backslash.
|
||||
*
|
||||
* @param name the name to escape so it can be used as a value in an {@link ObjectName}.
|
||||
* @return the escaped name
|
||||
*/
|
||||
public static String escape(final String name) {
|
||||
final StringBuilder sb = new StringBuilder(name.length() * 2);
|
||||
boolean needsQuotes = false;
|
||||
for (int i = 0; i < name.length(); i++) {
|
||||
final char c = name.charAt(i);
|
||||
switch (c) {
|
||||
case '\\':
|
||||
case '*':
|
||||
case '?':
|
||||
case '\"':
|
||||
// quote, star, question & backslash must be escaped
|
||||
sb.append('\\');
|
||||
needsQuotes = true; // ... and can only appear in quoted value
|
||||
break;
|
||||
case ',':
|
||||
case '=':
|
||||
case ':':
|
||||
// no need to escape these, but value must be quoted
|
||||
needsQuotes = true;
|
||||
break;
|
||||
case '\r':
|
||||
// drop \r characters: \\r gives "invalid escape sequence"
|
||||
continue;
|
||||
case '\n':
|
||||
// replace \n characters with \\n sequence
|
||||
sb.append("\\n");
|
||||
needsQuotes = true;
|
||||
continue;
|
||||
}
|
||||
sb.append(c);
|
||||
}
|
||||
if (needsQuotes) {
|
||||
sb.insert(0, '\"');
|
||||
sb.append('\"');
|
||||
}
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
private static boolean isJmxDisabled() {
|
||||
return PropertiesUtil.getProperties().getBooleanProperty(PROPERTY_DISABLE_JMX);
|
||||
}
|
||||
|
||||
public static void reregisterMBeansAfterReconfigure() {
|
||||
// avoid creating Platform MBean Server if JMX disabled
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for log4j2. Not registering MBeans.");
|
||||
return;
|
||||
}
|
||||
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
reregisterMBeansAfterReconfigure(mbs);
|
||||
}
|
||||
|
||||
public static void reregisterMBeansAfterReconfigure(final MBeanServer mbs) {
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for log4j2. Not registering MBeans.");
|
||||
return;
|
||||
}
|
||||
|
||||
// now provide instrumentation for the newly configured
|
||||
// LoggerConfigs and Appenders
|
||||
try {
|
||||
final ContextSelector selector = getContextSelector();
|
||||
if (selector == null) {
|
||||
LOGGER.debug("Could not register MBeans: no ContextSelector found.");
|
||||
return;
|
||||
}
|
||||
LOGGER.trace("Reregistering MBeans after reconfigure. Selector={}", selector);
|
||||
final List<LoggerContext> contexts = selector.getLoggerContexts();
|
||||
int i = 0;
|
||||
for (final LoggerContext ctx : contexts) {
|
||||
LOGGER.trace("Reregistering context ({}/{}): '{}' {}", ++i, contexts.size(), ctx.getName(), ctx);
|
||||
// first unregister the context and all nested loggers,
|
||||
// appenders, statusLogger, contextSelector, ringbuffers...
|
||||
unregisterLoggerContext(ctx.getName(), mbs);
|
||||
|
||||
final LoggerContextAdmin mbean = new LoggerContextAdmin(ctx, executor);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
|
||||
if (ctx instanceof AsyncLoggerContext) {
|
||||
final RingBufferAdmin rbmbean = ((AsyncLoggerContext) ctx).createRingBufferAdmin();
|
||||
if (rbmbean.getBufferSize() > 0) {
|
||||
// don't register if Disruptor not started (DefaultConfiguration: config not found)
|
||||
register(mbs, rbmbean, rbmbean.getObjectName());
|
||||
}
|
||||
}
|
||||
|
||||
// register the status logger and the context selector
|
||||
// repeatedly
|
||||
// for each known context: if one context is unregistered,
|
||||
// these MBeans should still be available for the other
|
||||
// contexts.
|
||||
registerStatusLogger(ctx.getName(), mbs, executor);
|
||||
registerContextSelector(ctx.getName(), selector, mbs, executor);
|
||||
|
||||
registerLoggerConfigs(ctx, mbs, executor);
|
||||
registerAppenders(ctx, mbs, executor);
|
||||
}
|
||||
} catch (final Exception ex) {
|
||||
LOGGER.error("Could not register mbeans", ex);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister all log4j MBeans from the platform MBean server.
|
||||
*/
|
||||
public static void unregisterMBeans() {
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans.");
|
||||
return;
|
||||
}
|
||||
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
unregisterMBeans(mbs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregister all log4j MBeans from the specified MBean server.
|
||||
*
|
||||
* @param mbs the MBean server to unregister from.
|
||||
*/
|
||||
public static void unregisterMBeans(final MBeanServer mbs) {
|
||||
unregisterStatusLogger("*", mbs);
|
||||
unregisterContextSelector("*", mbs);
|
||||
unregisterContexts(mbs);
|
||||
unregisterLoggerConfigs("*", mbs);
|
||||
unregisterAsyncLoggerRingBufferAdmins("*", mbs);
|
||||
unregisterAsyncLoggerConfigRingBufferAdmins("*", mbs);
|
||||
unregisterAppenders("*", mbs);
|
||||
unregisterAsyncAppenders("*", mbs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the {@code ContextSelector} of the current {@code Log4jContextFactory}.
|
||||
*
|
||||
* @return the {@code ContextSelector} of the current {@code Log4jContextFactory}
|
||||
*/
|
||||
private static ContextSelector getContextSelector() {
|
||||
final LoggerContextFactory factory = LogManager.getFactory();
|
||||
if (factory instanceof Log4jContextFactory) {
|
||||
final ContextSelector selector = ((Log4jContextFactory) factory).getSelector();
|
||||
return selector;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s
|
||||
* and {@code Appender}s from the platform MBean server.
|
||||
*
|
||||
* @param loggerContextName name of the logger context to unregister
|
||||
*/
|
||||
public static void unregisterLoggerContext(final String loggerContextName) {
|
||||
if (isJmxDisabled()) {
|
||||
LOGGER.debug("JMX disabled for Log4j2. Not unregistering MBeans.");
|
||||
return;
|
||||
}
|
||||
final MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
|
||||
unregisterLoggerContext(loggerContextName, mbs);
|
||||
}
|
||||
|
||||
/**
|
||||
* Unregisters all MBeans associated with the specified logger context (including MBeans for {@code LoggerConfig}s
|
||||
* and {@code Appender}s from the platform MBean server.
|
||||
*
|
||||
* @param contextName name of the logger context to unregister
|
||||
* @param mbs the MBean Server to unregister the instrumented objects from
|
||||
*/
|
||||
public static void unregisterLoggerContext(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = LoggerContextAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs); // unregister context mbean
|
||||
|
||||
// now unregister all MBeans associated with this logger context
|
||||
unregisterStatusLogger(contextName, mbs);
|
||||
unregisterContextSelector(contextName, mbs);
|
||||
unregisterLoggerConfigs(contextName, mbs);
|
||||
unregisterAppenders(contextName, mbs);
|
||||
unregisterAsyncAppenders(contextName, mbs);
|
||||
unregisterAsyncLoggerRingBufferAdmins(contextName, mbs);
|
||||
unregisterAsyncLoggerConfigRingBufferAdmins(contextName, mbs);
|
||||
}
|
||||
|
||||
private static void registerStatusLogger(final String contextName, final MBeanServer mbs, final Executor executor)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final StatusLoggerAdmin mbean = new StatusLoggerAdmin(contextName, executor);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
}
|
||||
|
||||
private static void registerContextSelector(final String contextName, final ContextSelector selector,
|
||||
final MBeanServer mbs, final Executor executor) throws InstanceAlreadyExistsException,
|
||||
MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final ContextSelectorAdmin mbean = new ContextSelectorAdmin(contextName, selector);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
}
|
||||
|
||||
private static void unregisterStatusLogger(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = StatusLoggerAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterContextSelector(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = ContextSelectorAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterLoggerConfigs(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = LoggerConfigAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterContexts(final MBeanServer mbs) {
|
||||
final String pattern = LoggerContextAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAppenders(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = AppenderAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAsyncAppenders(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern = AsyncAppenderAdminMBean.PATTERN;
|
||||
final String search = String.format(pattern, escape(contextName), "*");
|
||||
unregisterAllMatching(search, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAsyncLoggerRingBufferAdmins(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern1 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER;
|
||||
final String search1 = String.format(pattern1, escape(contextName));
|
||||
unregisterAllMatching(search1, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAsyncLoggerConfigRingBufferAdmins(final String contextName, final MBeanServer mbs) {
|
||||
final String pattern2 = RingBufferAdminMBean.PATTERN_ASYNC_LOGGER_CONFIG;
|
||||
final String search2 = String.format(pattern2, escape(contextName), "*");
|
||||
unregisterAllMatching(search2, mbs);
|
||||
}
|
||||
|
||||
private static void unregisterAllMatching(final String search, final MBeanServer mbs) {
|
||||
try {
|
||||
final ObjectName pattern = new ObjectName(search);
|
||||
final Set<ObjectName> found = mbs.queryNames(pattern, null);
|
||||
if (found.isEmpty()) {
|
||||
LOGGER.trace("Unregistering but no MBeans found matching '{}'", search);
|
||||
} else {
|
||||
LOGGER.trace("Unregistering {} MBeans: {}", found.size(), found);
|
||||
}
|
||||
for (final ObjectName objectName : found) {
|
||||
mbs.unregisterMBean(objectName);
|
||||
}
|
||||
} catch (final Exception ex) {
|
||||
LOGGER.error("Could not unregister MBeans for " + search, ex);
|
||||
}
|
||||
}
|
||||
|
||||
private static void registerLoggerConfigs(final LoggerContext ctx, final MBeanServer mbs, final Executor executor)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final Map<String, LoggerConfig> map = ctx.getConfiguration().getLoggers();
|
||||
for (final String name : map.keySet()) {
|
||||
final LoggerConfig cfg = map.get(name);
|
||||
final LoggerConfigAdmin mbean = new LoggerConfigAdmin(ctx, cfg);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
|
||||
if (cfg instanceof AsyncLoggerConfig) {
|
||||
final AsyncLoggerConfig async = (AsyncLoggerConfig) cfg;
|
||||
final RingBufferAdmin rbmbean = async.createRingBufferAdmin(ctx.getName());
|
||||
register(mbs, rbmbean, rbmbean.getObjectName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void registerAppenders(final LoggerContext ctx, final MBeanServer mbs, final Executor executor)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
|
||||
final Map<String, Appender> map = ctx.getConfiguration().getAppenders();
|
||||
for (final String name : map.keySet()) {
|
||||
final Appender appender = map.get(name);
|
||||
|
||||
if (appender instanceof AsyncAppender) {
|
||||
final AsyncAppender async = ((AsyncAppender) appender);
|
||||
final AsyncAppenderAdmin mbean = new AsyncAppenderAdmin(ctx.getName(), async);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
} else {
|
||||
final AppenderAdmin mbean = new AppenderAdmin(ctx.getName(), appender);
|
||||
register(mbs, mbean, mbean.getObjectName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void register(final MBeanServer mbs, final Object mbean, final ObjectName objectName)
|
||||
throws InstanceAlreadyExistsException, MBeanRegistrationException, NotCompliantMBeanException {
|
||||
LOGGER.debug("Registering MBean {}", objectName);
|
||||
mbs.registerMBean(mbean, objectName);
|
||||
}
|
||||
}
|
|
@ -87,7 +87,9 @@ public class Version {
|
|||
public static final Version V_5_0_0_alpha5 = new Version(V_5_0_0_alpha5_ID, org.apache.lucene.util.Version.LUCENE_6_1_0);
|
||||
public static final int V_5_0_0_alpha6_ID = 5000006;
|
||||
public static final Version V_5_0_0_alpha6 = new Version(V_5_0_0_alpha6_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final Version CURRENT = V_5_0_0_alpha6;
|
||||
public static final int V_6_0_0_alpha1_ID = 6000001;
|
||||
public static final Version V_6_0_0_alpha1 = new Version(V_6_0_0_alpha1_ID, org.apache.lucene.util.Version.LUCENE_6_2_0);
|
||||
public static final Version CURRENT = V_6_0_0_alpha1;
|
||||
|
||||
static {
|
||||
assert CURRENT.luceneVersion.equals(org.apache.lucene.util.Version.LATEST) : "Version must be upgraded to ["
|
||||
|
@ -100,6 +102,8 @@ public class Version {
|
|||
|
||||
public static Version fromId(int id) {
|
||||
switch (id) {
|
||||
case V_6_0_0_alpha1_ID:
|
||||
return V_6_0_0_alpha1;
|
||||
case V_5_0_0_alpha6_ID:
|
||||
return V_5_0_0_alpha6;
|
||||
case V_5_0_0_alpha5_ID:
|
||||
|
|
|
@ -211,30 +211,16 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
if (in.readBoolean()) {
|
||||
indices = NodeIndicesStats.readIndicesStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
os = new OsStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
process = ProcessStats.readProcessStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
jvm = JvmStats.readJvmStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
threadPool = ThreadPoolStats.readThreadPoolStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
fs = new FsInfo(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
transport = TransportStats.readTransportStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
http = HttpStats.readHttpStats(in);
|
||||
}
|
||||
breaker = AllCircuitBreakerStats.readOptionalAllCircuitBreakerStats(in);
|
||||
scriptStats = in.readOptionalStreamable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalStreamable(() -> new DiscoveryStats(null));
|
||||
os = in.readOptionalWriteable(OsStats::new);
|
||||
process = in.readOptionalWriteable(ProcessStats::new);
|
||||
jvm = in.readOptionalWriteable(JvmStats::new);
|
||||
threadPool = in.readOptionalWriteable(ThreadPoolStats::new);
|
||||
fs = in.readOptionalWriteable(FsInfo::new);
|
||||
transport = in.readOptionalWriteable(TransportStats::new);
|
||||
http = in.readOptionalWriteable(HttpStats::new);
|
||||
breaker = in.readOptionalWriteable(AllCircuitBreakerStats::new);
|
||||
scriptStats = in.readOptionalWriteable(ScriptStats::new);
|
||||
discoveryStats = in.readOptionalWriteable(DiscoveryStats::new);
|
||||
ingestStats = in.readOptionalWriteable(IngestStats::new);
|
||||
}
|
||||
|
||||
|
@ -248,51 +234,16 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
out.writeBoolean(true);
|
||||
indices.writeTo(out);
|
||||
}
|
||||
if (os == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
os.writeTo(out);
|
||||
}
|
||||
if (process == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
process.writeTo(out);
|
||||
}
|
||||
if (jvm == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
jvm.writeTo(out);
|
||||
}
|
||||
if (threadPool == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
threadPool.writeTo(out);
|
||||
}
|
||||
if (fs == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
fs.writeTo(out);
|
||||
}
|
||||
if (transport == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
transport.writeTo(out);
|
||||
}
|
||||
if (http == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
http.writeTo(out);
|
||||
}
|
||||
out.writeOptionalStreamable(breaker);
|
||||
out.writeOptionalStreamable(scriptStats);
|
||||
out.writeOptionalStreamable(discoveryStats);
|
||||
out.writeOptionalWriteable(os);
|
||||
out.writeOptionalWriteable(process);
|
||||
out.writeOptionalWriteable(jvm);
|
||||
out.writeOptionalWriteable(threadPool);
|
||||
out.writeOptionalWriteable(fs);
|
||||
out.writeOptionalWriteable(transport);
|
||||
out.writeOptionalWriteable(http);
|
||||
out.writeOptionalWriteable(breaker);
|
||||
out.writeOptionalWriteable(scriptStats);
|
||||
out.writeOptionalWriteable(discoveryStats);
|
||||
out.writeOptionalWriteable(ingestStats);
|
||||
}
|
||||
|
||||
|
@ -318,11 +269,9 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
builder.endObject();
|
||||
}
|
||||
}
|
||||
|
||||
if (getIndices() != null) {
|
||||
getIndices().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getOs() != null) {
|
||||
getOs().toXContent(builder, params);
|
||||
}
|
||||
|
@ -350,15 +299,12 @@ public class NodeStats extends BaseNodeResponse implements ToXContent {
|
|||
if (getScriptStats() != null) {
|
||||
getScriptStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getDiscoveryStats() != null) {
|
||||
getDiscoveryStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
if (getIngestStats() != null) {
|
||||
getIngestStats().toXContent(builder, params);
|
||||
}
|
||||
|
||||
return builder;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -268,7 +268,7 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
indices = CommonStatsFlags.readCommonStatsFlags(in);
|
||||
indices = new CommonStatsFlags(in);
|
||||
os = in.readBoolean();
|
||||
process = in.readBoolean();
|
||||
jvm = in.readBoolean();
|
||||
|
@ -298,5 +298,4 @@ public class NodesStatsRequest extends BaseNodesRequest<NodesStatsRequest> {
|
|||
out.writeBoolean(discovery);
|
||||
out.writeBoolean(ingest);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -216,7 +216,7 @@ public class TransportGetTaskAction extends HandledTransportAction<GetTaskReques
|
|||
public void onFailure(Exception e) {
|
||||
if (ExceptionsHelper.unwrap(e, IndexNotFoundException.class) != null) {
|
||||
// We haven't yet created the index for the task results so it can't be found.
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running or stored its results", e,
|
||||
listener.onFailure(new ResourceNotFoundException("task [{}] isn't running and hasn't stored its results", e,
|
||||
request.getTaskId()));
|
||||
} else {
|
||||
listener.onFailure(e);
|
||||
|
|
|
@ -83,7 +83,7 @@ public class ClusterStatsNodes implements ToXContent {
|
|||
continue;
|
||||
}
|
||||
if (nodeResponse.nodeStats().getFs() != null) {
|
||||
this.fs.add(nodeResponse.nodeStats().getFs().total());
|
||||
this.fs.add(nodeResponse.nodeStats().getFs().getTotal());
|
||||
}
|
||||
}
|
||||
this.counts = new Counts(nodeInfos);
|
||||
|
|
|
@ -292,7 +292,7 @@ public class DetailAnalyzeResponse implements Streamable, ToXContent {
|
|||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject();
|
||||
builder.field(Fields.NAME, name);
|
||||
builder.field(Fields.FILTERED_TEXT, texts);
|
||||
builder.array(Fields.FILTERED_TEXT, texts);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package org.elasticsearch.action.admin.indices.stats;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
@ -32,13 +32,13 @@ import org.elasticsearch.index.engine.SegmentsStats;
|
|||
import org.elasticsearch.index.fielddata.FieldDataStats;
|
||||
import org.elasticsearch.index.flush.FlushStats;
|
||||
import org.elasticsearch.index.get.GetStats;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.merge.MergeStats;
|
||||
import org.elasticsearch.index.recovery.RecoveryStats;
|
||||
import org.elasticsearch.index.refresh.RefreshStats;
|
||||
import org.elasticsearch.index.search.stats.SearchStats;
|
||||
import org.elasticsearch.index.shard.DocsStats;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.IndexingStats;
|
||||
import org.elasticsearch.index.store.StoreStats;
|
||||
import org.elasticsearch.index.translog.TranslogStats;
|
||||
import org.elasticsearch.index.warmer.WarmerStats;
|
||||
|
@ -47,9 +47,55 @@ import org.elasticsearch.search.suggest.completion.CompletionStats;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class CommonStats implements Streamable, ToXContent {
|
||||
public class CommonStats implements Writeable, ToXContent {
|
||||
|
||||
@Nullable
|
||||
public DocsStats docs;
|
||||
|
||||
@Nullable
|
||||
public StoreStats store;
|
||||
|
||||
@Nullable
|
||||
public IndexingStats indexing;
|
||||
|
||||
@Nullable
|
||||
public GetStats get;
|
||||
|
||||
@Nullable
|
||||
public SearchStats search;
|
||||
|
||||
@Nullable
|
||||
public MergeStats merge;
|
||||
|
||||
@Nullable
|
||||
public RefreshStats refresh;
|
||||
|
||||
@Nullable
|
||||
public FlushStats flush;
|
||||
|
||||
@Nullable
|
||||
public WarmerStats warmer;
|
||||
|
||||
@Nullable
|
||||
public QueryCacheStats queryCache;
|
||||
|
||||
@Nullable
|
||||
public FieldDataStats fieldData;
|
||||
|
||||
@Nullable
|
||||
public CompletionStats completion;
|
||||
|
||||
@Nullable
|
||||
public SegmentsStats segments;
|
||||
|
||||
@Nullable
|
||||
public TranslogStats translog;
|
||||
|
||||
@Nullable
|
||||
public RequestCacheStats requestCache;
|
||||
|
||||
@Nullable
|
||||
public RecoveryStats recoveryStats;
|
||||
|
||||
public CommonStats() {
|
||||
this(CommonStatsFlags.NONE);
|
||||
|
@ -117,11 +163,8 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
|
||||
|
||||
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
|
||||
|
||||
for (CommonStatsFlags.Flag flag : setFlags) {
|
||||
switch (flag) {
|
||||
case Docs:
|
||||
|
@ -181,53 +224,135 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public DocsStats docs;
|
||||
public CommonStats(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
docs = DocsStats.readDocStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
store = StoreStats.readStoreStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
indexing = IndexingStats.readIndexingStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
get = GetStats.readGetStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
search = SearchStats.readSearchStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
merge = MergeStats.readMergeStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
refresh = RefreshStats.readRefreshStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
flush = FlushStats.readFlushStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
warmer = WarmerStats.readWarmerStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
queryCache = QueryCacheStats.readQueryCacheStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
segments = SegmentsStats.readSegmentsStats(in);
|
||||
}
|
||||
translog = in.readOptionalStreamable(TranslogStats::new);
|
||||
requestCache = in.readOptionalStreamable(RequestCacheStats::new);
|
||||
recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public StoreStats store;
|
||||
|
||||
@Nullable
|
||||
public IndexingStats indexing;
|
||||
|
||||
@Nullable
|
||||
public GetStats get;
|
||||
|
||||
@Nullable
|
||||
public SearchStats search;
|
||||
|
||||
@Nullable
|
||||
public MergeStats merge;
|
||||
|
||||
@Nullable
|
||||
public RefreshStats refresh;
|
||||
|
||||
@Nullable
|
||||
public FlushStats flush;
|
||||
|
||||
@Nullable
|
||||
public WarmerStats warmer;
|
||||
|
||||
@Nullable
|
||||
public QueryCacheStats queryCache;
|
||||
|
||||
@Nullable
|
||||
public FieldDataStats fieldData;
|
||||
|
||||
@Nullable
|
||||
public CompletionStats completion;
|
||||
|
||||
@Nullable
|
||||
public SegmentsStats segments;
|
||||
|
||||
@Nullable
|
||||
public TranslogStats translog;
|
||||
|
||||
@Nullable
|
||||
public RequestCacheStats requestCache;
|
||||
|
||||
@Nullable
|
||||
public RecoveryStats recoveryStats;
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (docs == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
docs.writeTo(out);
|
||||
}
|
||||
if (store == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
store.writeTo(out);
|
||||
}
|
||||
if (indexing == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
indexing.writeTo(out);
|
||||
}
|
||||
if (get == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
get.writeTo(out);
|
||||
}
|
||||
if (search == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
search.writeTo(out);
|
||||
}
|
||||
if (merge == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
merge.writeTo(out);
|
||||
}
|
||||
if (refresh == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
refresh.writeTo(out);
|
||||
}
|
||||
if (flush == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
flush.writeTo(out);
|
||||
}
|
||||
if (warmer == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
warmer.writeTo(out);
|
||||
}
|
||||
if (queryCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
queryCache.writeTo(out);
|
||||
}
|
||||
if (fieldData == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
fieldData.writeTo(out);
|
||||
}
|
||||
if (completion == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
completion.writeTo(out);
|
||||
}
|
||||
if (segments == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
segments.writeTo(out);
|
||||
}
|
||||
out.writeOptionalStreamable(translog);
|
||||
out.writeOptionalStreamable(requestCache);
|
||||
out.writeOptionalStreamable(recoveryStats);
|
||||
}
|
||||
|
||||
public void add(CommonStats stats) {
|
||||
if (docs == null) {
|
||||
|
@ -441,12 +566,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
return recoveryStats;
|
||||
}
|
||||
|
||||
public static CommonStats readCommonStats(StreamInput in) throws IOException {
|
||||
CommonStats stats = new CommonStats();
|
||||
stats.readFrom(in);
|
||||
return stats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility method which computes total memory by adding
|
||||
* FieldData, PercolatorCache, Segments (memory, index writer, version map)
|
||||
|
@ -468,137 +587,6 @@ public class CommonStats implements Streamable, ToXContent {
|
|||
return new ByteSizeValue(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
docs = DocsStats.readDocStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
store = StoreStats.readStoreStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
indexing = IndexingStats.readIndexingStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
get = GetStats.readGetStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
search = SearchStats.readSearchStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
merge = MergeStats.readMergeStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
refresh = RefreshStats.readRefreshStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
flush = FlushStats.readFlushStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
warmer = WarmerStats.readWarmerStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
queryCache = QueryCacheStats.readQueryCacheStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
fieldData = FieldDataStats.readFieldDataStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
completion = CompletionStats.readCompletionStats(in);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
segments = SegmentsStats.readSegmentsStats(in);
|
||||
}
|
||||
translog = in.readOptionalStreamable(TranslogStats::new);
|
||||
requestCache = in.readOptionalStreamable(RequestCacheStats::new);
|
||||
recoveryStats = in.readOptionalStreamable(RecoveryStats::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (docs == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
docs.writeTo(out);
|
||||
}
|
||||
if (store == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
store.writeTo(out);
|
||||
}
|
||||
if (indexing == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
indexing.writeTo(out);
|
||||
}
|
||||
if (get == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
get.writeTo(out);
|
||||
}
|
||||
if (search == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
search.writeTo(out);
|
||||
}
|
||||
if (merge == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
merge.writeTo(out);
|
||||
}
|
||||
if (refresh == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
refresh.writeTo(out);
|
||||
}
|
||||
if (flush == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
flush.writeTo(out);
|
||||
}
|
||||
if (warmer == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
warmer.writeTo(out);
|
||||
}
|
||||
if (queryCache == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
queryCache.writeTo(out);
|
||||
}
|
||||
if (fieldData == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
fieldData.writeTo(out);
|
||||
}
|
||||
if (completion == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
completion.writeTo(out);
|
||||
}
|
||||
if (segments == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
out.writeBoolean(true);
|
||||
segments.writeTo(out);
|
||||
}
|
||||
out.writeOptionalStreamable(translog);
|
||||
out.writeOptionalStreamable(requestCache);
|
||||
out.writeOptionalStreamable(recoveryStats);
|
||||
}
|
||||
|
||||
// note, requires a wrapping object
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
|
|
|
@ -19,17 +19,15 @@
|
|||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.EnumSet;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class CommonStatsFlags implements Streamable, Cloneable {
|
||||
public class CommonStatsFlags implements Writeable, Cloneable {
|
||||
|
||||
public static final CommonStatsFlags ALL = new CommonStatsFlags().all();
|
||||
public static final CommonStatsFlags NONE = new CommonStatsFlags().clear();
|
||||
|
@ -41,19 +39,45 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
private String[] completionDataFields = null;
|
||||
private boolean includeSegmentFileSizes = false;
|
||||
|
||||
|
||||
/**
|
||||
* @param flags flags to set. If no flags are supplied, default flags will be set.
|
||||
*/
|
||||
public CommonStatsFlags(Flag... flags) {
|
||||
if (flags.length > 0) {
|
||||
clear();
|
||||
for (Flag f : flags) {
|
||||
this.flags.add(f);
|
||||
}
|
||||
Collections.addAll(this.flags, flags);
|
||||
}
|
||||
}
|
||||
|
||||
public CommonStatsFlags(StreamInput in) throws IOException {
|
||||
final long longFlags = in.readLong();
|
||||
flags.clear();
|
||||
for (Flag flag : Flag.values()) {
|
||||
if ((longFlags & (1 << flag.ordinal())) != 0) {
|
||||
flags.add(flag);
|
||||
}
|
||||
}
|
||||
types = in.readStringArray();
|
||||
groups = in.readStringArray();
|
||||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
long longFlags = 0;
|
||||
for (Flag flag : flags) {
|
||||
longFlags |= (1 << flag.ordinal());
|
||||
}
|
||||
out.writeLong(longFlags);
|
||||
|
||||
out.writeStringArrayNullable(types);
|
||||
out.writeStringArrayNullable(groups);
|
||||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets all flags to return all stats.
|
||||
|
@ -162,7 +186,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
flags.add(flag);
|
||||
}
|
||||
|
||||
|
||||
public CommonStatsFlags set(Flag flag, boolean add) {
|
||||
if (add) {
|
||||
set(flag);
|
||||
|
@ -172,49 +195,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
return this;
|
||||
}
|
||||
|
||||
public static CommonStatsFlags readCommonStatsFlags(StreamInput in) throws IOException {
|
||||
CommonStatsFlags flags = new CommonStatsFlags();
|
||||
flags.readFrom(in);
|
||||
return flags;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
long longFlags = 0;
|
||||
for (Flag flag : flags) {
|
||||
longFlags |= (1 << flag.ordinal());
|
||||
}
|
||||
out.writeLong(longFlags);
|
||||
|
||||
out.writeStringArrayNullable(types);
|
||||
out.writeStringArrayNullable(groups);
|
||||
out.writeStringArrayNullable(fieldDataFields);
|
||||
out.writeStringArrayNullable(completionDataFields);
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
out.writeBoolean(includeSegmentFileSizes);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
final long longFlags = in.readLong();
|
||||
flags.clear();
|
||||
for (Flag flag : Flag.values()) {
|
||||
if ((longFlags & (1 << flag.ordinal())) != 0) {
|
||||
flags.add(flag);
|
||||
}
|
||||
}
|
||||
types = in.readStringArray();
|
||||
groups = in.readStringArray();
|
||||
fieldDataFields = in.readStringArray();
|
||||
completionDataFields = in.readStringArray();
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha1)) {
|
||||
includeSegmentFileSizes = in.readBoolean();
|
||||
} else {
|
||||
includeSegmentFileSizes = false;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public CommonStatsFlags clone() {
|
||||
try {
|
||||
|
@ -226,7 +206,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
}
|
||||
}
|
||||
|
||||
public static enum Flag {
|
||||
public enum Flag {
|
||||
// Do not change the order of these flags we use
|
||||
// the ordinal for encoding! Only append to the end!
|
||||
Store("store"),
|
||||
|
@ -247,7 +227,6 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
RequestCache("request_cache"),
|
||||
Recovery("recovery");
|
||||
|
||||
|
||||
private final String restName;
|
||||
|
||||
Flag(String restName) {
|
||||
|
@ -257,6 +236,5 @@ public class CommonStatsFlags implements Streamable, Cloneable {
|
|||
public String getRestName() {
|
||||
return restName;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -274,6 +274,6 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
flags = CommonStatsFlags.readCommonStatsFlags(in);
|
||||
flags = new CommonStatsFlags(in);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ public class ShardStats implements Streamable, ToXContent {
|
|||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
shardRouting = new ShardRouting(in);
|
||||
commonStats = CommonStats.readCommonStats(in);
|
||||
commonStats = new CommonStats(in);
|
||||
commitStats = CommitStats.readOptionalCommitStatsFrom(in);
|
||||
statePath = in.readString();
|
||||
dataPath = in.readString();
|
||||
|
|
|
@ -27,9 +27,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Collections.singletonMap;
|
||||
|
||||
|
|
|
@ -74,6 +74,8 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
|
||||
private Map<String, IndexMetaData.Custom> customs = new HashMap<>();
|
||||
|
||||
private Integer version;
|
||||
|
||||
public PutIndexTemplateRequest() {
|
||||
}
|
||||
|
||||
|
@ -129,6 +131,15 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
return this.order;
|
||||
}
|
||||
|
||||
public PutIndexTemplateRequest version(Integer version) {
|
||||
this.version = version;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Integer version() {
|
||||
return this.version;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
|
||||
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
|
||||
|
@ -278,16 +289,23 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
template(entry.getValue().toString());
|
||||
} else if (name.equals("order")) {
|
||||
order(XContentMapValues.nodeIntegerValue(entry.getValue(), order()));
|
||||
} else if ("version".equals(name)) {
|
||||
if ((entry.getValue() instanceof Integer) == false) {
|
||||
throw new IllegalArgumentException("Malformed [version] value, should be an integer");
|
||||
}
|
||||
version((Integer)entry.getValue());
|
||||
} else if (name.equals("settings")) {
|
||||
if (!(entry.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("Malformed settings section, should include an inner object");
|
||||
throw new IllegalArgumentException("Malformed [settings] section, should include an inner object");
|
||||
}
|
||||
settings((Map<String, Object>) entry.getValue());
|
||||
} else if (name.equals("mappings")) {
|
||||
Map<String, Object> mappings = (Map<String, Object>) entry.getValue();
|
||||
for (Map.Entry<String, Object> entry1 : mappings.entrySet()) {
|
||||
if (!(entry1.getValue() instanceof Map)) {
|
||||
throw new IllegalArgumentException("Malformed mappings section for type [" + entry1.getKey() + "], should include an inner object describing the mapping");
|
||||
throw new IllegalArgumentException(
|
||||
"Malformed [mappings] section for type [" + entry1.getKey() +
|
||||
"], should include an inner object describing the mapping");
|
||||
}
|
||||
mapping(entry1.getKey(), (Map<String, Object>) entry1.getValue());
|
||||
}
|
||||
|
@ -449,6 +467,7 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
for (int i = 0; i < aliasesSize; i++) {
|
||||
aliases.add(Alias.read(in));
|
||||
}
|
||||
version = in.readOptionalVInt();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -474,5 +493,6 @@ public class PutIndexTemplateRequest extends MasterNodeRequest<PutIndexTemplateR
|
|||
for (Alias alias : aliases) {
|
||||
alias.writeTo(out);
|
||||
}
|
||||
out.writeOptionalVInt(version);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,8 @@ import java.util.Map;
|
|||
/**
|
||||
*
|
||||
*/
|
||||
public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBuilder<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
|
||||
public class PutIndexTemplateRequestBuilder
|
||||
extends MasterNodeOperationRequestBuilder<PutIndexTemplateRequest, PutIndexTemplateResponse, PutIndexTemplateRequestBuilder> {
|
||||
|
||||
public PutIndexTemplateRequestBuilder(ElasticsearchClient client, PutIndexTemplateAction action) {
|
||||
super(client, action, new PutIndexTemplateRequest());
|
||||
|
@ -56,6 +57,14 @@ public class PutIndexTemplateRequestBuilder extends MasterNodeOperationRequestBu
|
|||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the optional version of this template.
|
||||
*/
|
||||
public PutIndexTemplateRequestBuilder setVersion(Integer version) {
|
||||
request.version(version);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set to <tt>true</tt> to force only creation, not an update of an index template. If it already
|
||||
* exists, it will fail with an {@link org.elasticsearch.indices.IndexTemplateAlreadyExistsException}.
|
||||
|
|
|
@ -86,7 +86,8 @@ public class TransportPutIndexTemplateAction extends TransportMasterNodeAction<P
|
|||
.aliases(request.aliases())
|
||||
.customs(request.customs())
|
||||
.create(request.create())
|
||||
.masterTimeout(request.masterNodeTimeout()),
|
||||
.masterTimeout(request.masterNodeTimeout())
|
||||
.version(request.version()),
|
||||
|
||||
new MetaDataIndexTemplateService.PutListener() {
|
||||
@Override
|
||||
|
|
|
@ -38,17 +38,11 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Randomness;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.query.ParsedQuery;
|
||||
import org.elasticsearch.index.query.QueryShardException;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
|
@ -67,25 +61,15 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
|
|||
*/
|
||||
public class TransportValidateQueryAction extends TransportBroadcastAction<ValidateQueryRequest, ValidateQueryResponse, ShardValidateQueryRequest, ShardValidateQueryResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public TransportValidateQueryAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
BigArrays bigArrays, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver, FetchPhase fetchPhase) {
|
||||
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ValidateQueryAction.NAME, threadPool, clusterService, transportService, actionFilters,
|
||||
indexNameExpressionResolver, ValidateQueryRequest::new, ShardValidateQueryRequest::new, ThreadPool.Names.SEARCH);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
this.searchService = searchService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -161,29 +145,20 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
|
||||
@Override
|
||||
protected ShardValidateQueryResponse shardOperation(ShardValidateQueryRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.getShard(request.shardId().id());
|
||||
|
||||
boolean valid;
|
||||
String explanation = null;
|
||||
String error = null;
|
||||
Engine.Searcher searcher = indexShard.acquireSearcher("validate_query");
|
||||
|
||||
DefaultSearchContext searchContext = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(request.types(), request.nowInMillis(), request.filteringAliases()), null, searcher,
|
||||
indexService, indexShard, scriptService, bigArrays, threadPool.estimatedTimeInMillisCounter(),
|
||||
parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(request.shardId(), request.types(),
|
||||
request.nowInMillis(), request.filteringAliases());
|
||||
SearchContext searchContext = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
|
||||
SearchContext.setCurrent(searchContext);
|
||||
try {
|
||||
searchContext.parsedQuery(searchContext.getQueryShardContext().toQuery(request.query()));
|
||||
searchContext.preProcess();
|
||||
|
||||
ParsedQuery parsedQuery = searchContext.getQueryShardContext().toQuery(request.query());
|
||||
searchContext.parsedQuery(parsedQuery);
|
||||
searchContext.preProcess(request.rewrite());
|
||||
valid = true;
|
||||
if (request.rewrite()) {
|
||||
explanation = getRewrittenQuery(searcher.searcher(), searchContext.query());
|
||||
} else if (request.explain()) {
|
||||
explanation = searchContext.filteredQuery().query().toString();
|
||||
}
|
||||
explanation = explain(searchContext, request.rewrite());
|
||||
} catch (QueryShardException|ParsingException e) {
|
||||
valid = false;
|
||||
error = e.getDetailedMessage();
|
||||
|
@ -191,19 +166,18 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
|||
valid = false;
|
||||
error = e.getMessage();
|
||||
} finally {
|
||||
searchContext.close();
|
||||
SearchContext.removeCurrent();
|
||||
Releasables.close(searchContext, () -> SearchContext.removeCurrent());
|
||||
}
|
||||
|
||||
return new ShardValidateQueryResponse(request.shardId(), valid, explanation, error);
|
||||
}
|
||||
|
||||
private String getRewrittenQuery(IndexSearcher searcher, Query query) throws IOException {
|
||||
Query queryRewrite = searcher.rewrite(query);
|
||||
if (queryRewrite instanceof MatchNoDocsQuery) {
|
||||
return query.toString();
|
||||
private String explain(SearchContext context, boolean rewritten) throws IOException {
|
||||
Query query = context.query();
|
||||
if (rewritten && query instanceof MatchNoDocsQuery) {
|
||||
return context.parsedQuery().query().toString();
|
||||
} else {
|
||||
return queryRewrite.toString();
|
||||
return query.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -293,7 +293,7 @@ public class BulkProcessor implements Closeable {
|
|||
}
|
||||
|
||||
public synchronized BulkProcessor add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultPipeline, @Nullable Object payload) throws Exception {
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, defaultPipeline, payload, true);
|
||||
bulkRequest.add(data, defaultIndex, defaultType, null, null, null, defaultPipeline, payload, true);
|
||||
executeIfNeeded();
|
||||
return this;
|
||||
}
|
||||
|
|
|
@ -35,12 +35,15 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContent;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
@ -57,6 +60,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
* @see org.elasticsearch.client.Client#bulk(BulkRequest)
|
||||
*/
|
||||
public class BulkRequest extends ActionRequest<BulkRequest> implements CompositeIndicesRequest, WriteRequest<BulkRequest> {
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER =
|
||||
new DeprecationLogger(Loggers.getLogger(BulkRequest.class));
|
||||
|
||||
private static final int REQUEST_OVERHEAD = 50;
|
||||
|
||||
|
@ -257,17 +262,17 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
* Adds a framed data in binary format
|
||||
*/
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, true);
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, null, true);
|
||||
}
|
||||
|
||||
/**
|
||||
* Adds a framed data in binary format
|
||||
*/
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, boolean allowExplicitIndex) throws Exception {
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, allowExplicitIndex);
|
||||
return add(data, defaultIndex, defaultType, null, null, null, null, null, allowExplicitIndex);
|
||||
}
|
||||
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
|
||||
public BulkRequest add(BytesReference data, @Nullable String defaultIndex, @Nullable String defaultType, @Nullable String defaultRouting, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSourceContext, @Nullable String defaultPipeline, @Nullable Object payload, boolean allowExplicitIndex) throws Exception {
|
||||
XContent xContent = XContentFactory.xContent(data);
|
||||
int line = 0;
|
||||
int from = 0;
|
||||
|
@ -301,6 +306,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
String id = null;
|
||||
String routing = defaultRouting;
|
||||
String parent = null;
|
||||
FetchSourceContext fetchSourceContext = defaultFetchSourceContext;
|
||||
String[] fields = defaultFields;
|
||||
String timestamp = null;
|
||||
TimeValue ttl = null;
|
||||
|
@ -353,16 +359,21 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
pipeline = parser.text();
|
||||
} else if ("fields".equals(currentFieldName)) {
|
||||
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains a simple value for parameter [fields] while a list is expected");
|
||||
} else if ("_source".equals(currentFieldName)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Action/metadata line [" + line + "] contains an unknown parameter [" + currentFieldName + "]");
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("fields".equals(currentFieldName)) {
|
||||
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
|
||||
List<Object> values = parser.list();
|
||||
fields = values.toArray(new String[values.size()]);
|
||||
} else {
|
||||
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
|
||||
}
|
||||
} else if (token == XContentParser.Token.START_OBJECT && "_source".equals(currentFieldName)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser);
|
||||
} else if (token != XContentParser.Token.VALUE_NULL) {
|
||||
throw new IllegalArgumentException("Malformed action/metadata line [" + line + "], expected a simple value for field [" + currentFieldName + "] but found [" + token + "]");
|
||||
}
|
||||
|
@ -402,7 +413,10 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
|||
.version(version).versionType(versionType)
|
||||
.routing(routing)
|
||||
.parent(parent)
|
||||
.source(data.slice(from, nextMarker - from));
|
||||
.fromXContent(data.slice(from, nextMarker - from));
|
||||
if (fetchSourceContext != null) {
|
||||
updateRequest.fetchSource(fetchSourceContext);
|
||||
}
|
||||
if (fields != null) {
|
||||
updateRequest.fields(fields);
|
||||
}
|
||||
|
|
|
@ -251,7 +251,8 @@ public class TransportShardBulkAction extends TransportWriteAction<BulkShardRequ
|
|||
// add the response
|
||||
IndexResponse indexResponse = result.getResponse();
|
||||
UpdateResponse updateResponse = new UpdateResponse(indexResponse.getShardInfo(), indexResponse.getShardId(), indexResponse.getType(), indexResponse.getId(), indexResponse.getVersion(), indexResponse.getResult());
|
||||
if (updateRequest.fields() != null && updateRequest.fields().length > 0) {
|
||||
if ((updateRequest.fetchSource() != null && updateRequest.fetchSource().fetchSource()) ||
|
||||
(updateRequest.fields() != null && updateRequest.fields().length > 0)) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(indexSourceAsBytes, true);
|
||||
updateResponse.setGetResult(updateHelper.extractGetResult(updateRequest, request.index(), indexResponse.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), indexSourceAsBytes));
|
||||
}
|
||||
|
|
|
@ -40,7 +40,7 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
|
|||
private String routing;
|
||||
private String preference;
|
||||
private QueryBuilder query;
|
||||
private String[] fields;
|
||||
private String[] storedFields;
|
||||
private FetchSourceContext fetchSourceContext;
|
||||
|
||||
private String[] filteringAlias = Strings.EMPTY_ARRAY;
|
||||
|
@ -122,12 +122,12 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
|
|||
}
|
||||
|
||||
|
||||
public String[] fields() {
|
||||
return fields;
|
||||
public String[] storedFields() {
|
||||
return storedFields;
|
||||
}
|
||||
|
||||
public ExplainRequest fields(String[] fields) {
|
||||
this.fields = fields;
|
||||
public ExplainRequest storedFields(String[] fields) {
|
||||
this.storedFields = fields;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -167,8 +167,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
|
|||
preference = in.readOptionalString();
|
||||
query = in.readNamedWriteable(QueryBuilder.class);
|
||||
filteringAlias = in.readStringArray();
|
||||
fields = in.readOptionalStringArray();
|
||||
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
|
||||
storedFields = in.readOptionalStringArray();
|
||||
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
|
||||
nowInMillis = in.readVLong();
|
||||
}
|
||||
|
||||
|
@ -181,8 +181,8 @@ public class ExplainRequest extends SingleShardRequest<ExplainRequest> {
|
|||
out.writeOptionalString(preference);
|
||||
out.writeNamedWriteable(query);
|
||||
out.writeStringArray(filteringAlias);
|
||||
out.writeOptionalStringArray(fields);
|
||||
out.writeOptionalStreamable(fetchSourceContext);
|
||||
out.writeOptionalStringArray(storedFields);
|
||||
out.writeOptionalWriteable(fetchSourceContext);
|
||||
out.writeVLong(nowInMillis);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -88,10 +88,10 @@ public class ExplainRequestBuilder extends SingleShardOperationRequestBuilder<Ex
|
|||
}
|
||||
|
||||
/**
|
||||
* Explicitly specify the fields that will be returned for the explained document. By default, nothing is returned.
|
||||
* Explicitly specify the stored fields that will be returned for the explained document. By default, nothing is returned.
|
||||
*/
|
||||
public ExplainRequestBuilder setFields(String... fields) {
|
||||
request.fields(fields);
|
||||
public ExplainRequestBuilder setStoredFields(String... fields) {
|
||||
request.storedFields(fields);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,20 +31,14 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.routing.ShardIterator;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.lease.Releasables;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Engine;
|
||||
import org.elasticsearch.index.get.GetResult;
|
||||
import org.elasticsearch.index.mapper.Uid;
|
||||
import org.elasticsearch.index.mapper.UidFieldMapper;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.search.fetch.FetchPhase;
|
||||
import org.elasticsearch.search.internal.DefaultSearchContext;
|
||||
import org.elasticsearch.search.internal.SearchContext;
|
||||
import org.elasticsearch.search.internal.ShardSearchLocalRequest;
|
||||
import org.elasticsearch.search.rescore.RescoreSearchContext;
|
||||
|
@ -60,26 +54,15 @@ import java.io.IOException;
|
|||
// TODO: AggregatedDfs. Currently the idf can be different then when executing a normal search with explain.
|
||||
public class TransportExplainAction extends TransportSingleShardAction<ExplainRequest, ExplainResponse> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
private final ScriptService scriptService;
|
||||
|
||||
|
||||
private final BigArrays bigArrays;
|
||||
|
||||
private final FetchPhase fetchPhase;
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public TransportExplainAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
|
||||
TransportService transportService, IndicesService indicesService, ScriptService scriptService,
|
||||
BigArrays bigArrays, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
FetchPhase fetchPhase) {
|
||||
TransportService transportService, SearchService searchService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ExplainAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ExplainRequest::new, ThreadPool.Names.GET);
|
||||
this.indicesService = indicesService;
|
||||
this.scriptService = scriptService;
|
||||
this.bigArrays = bigArrays;
|
||||
this.fetchPhase = fetchPhase;
|
||||
this.searchService = searchService;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -104,34 +87,30 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
|
||||
@Override
|
||||
protected ExplainResponse shardOperation(ExplainRequest request, ShardId shardId) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardId.getIndex());
|
||||
IndexShard indexShard = indexService.getShard(shardId.id());
|
||||
ShardSearchLocalRequest shardSearchLocalRequest = new ShardSearchLocalRequest(shardId,
|
||||
new String[]{request.type()}, request.nowInMillis, request.filteringAlias());
|
||||
SearchContext context = searchService.createSearchContext(shardSearchLocalRequest, SearchService.NO_TIMEOUT, null);
|
||||
Term uidTerm = new Term(UidFieldMapper.NAME, Uid.createUidAsBytes(request.type(), request.id()));
|
||||
Engine.GetResult result = indexShard.get(new Engine.Get(false, uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
|
||||
SearchContext context = new DefaultSearchContext(0,
|
||||
new ShardSearchLocalRequest(new String[] { request.type() }, request.nowInMillis, request.filteringAlias()), null,
|
||||
result.searcher(), indexService, indexShard, scriptService, bigArrays,
|
||||
threadPool.estimatedTimeInMillisCounter(), parseFieldMatcher, SearchService.NO_TIMEOUT, fetchPhase);
|
||||
SearchContext.setCurrent(context);
|
||||
|
||||
Engine.GetResult result = null;
|
||||
try {
|
||||
result = context.indexShard().get(new Engine.Get(false, uidTerm));
|
||||
if (!result.exists()) {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), false);
|
||||
}
|
||||
context.parsedQuery(context.getQueryShardContext().toQuery(request.query()));
|
||||
context.preProcess();
|
||||
context.preProcess(true);
|
||||
int topLevelDocId = result.docIdAndVersion().docId + result.docIdAndVersion().context.docBase;
|
||||
Explanation explanation = context.searcher().explain(context.query(), topLevelDocId);
|
||||
for (RescoreSearchContext ctx : context.rescore()) {
|
||||
Rescorer rescorer = ctx.rescorer();
|
||||
explanation = rescorer.explain(topLevelDocId, context, ctx, explanation);
|
||||
}
|
||||
if (request.fields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
|
||||
if (request.storedFields() != null || (request.fetchSourceContext() != null && request.fetchSourceContext().fetchSource())) {
|
||||
// Advantage is that we're not opening a second searcher to retrieve the _source. Also
|
||||
// because we are working in the same searcher in engineGetResult we can be sure that a
|
||||
// doc isn't deleted between the initial get and this call.
|
||||
GetResult getResult = indexShard.getService().get(result, request.id(), request.type(), request.fields(), request.fetchSourceContext());
|
||||
GetResult getResult = context.indexShard().getService().get(result, request.id(), request.type(), request.storedFields(), request.fetchSourceContext());
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation, getResult);
|
||||
} else {
|
||||
return new ExplainResponse(shardId.getIndexName(), request.type(), request.id(), true, explanation);
|
||||
|
@ -139,8 +118,7 @@ public class TransportExplainAction extends TransportSingleShardAction<ExplainRe
|
|||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Could not explain", e);
|
||||
} finally {
|
||||
context.close();
|
||||
SearchContext.removeCurrent();
|
||||
Releasables.close(result, context, () -> SearchContext.removeCurrent());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.InetAddress;
|
||||
import java.util.Objects;
|
||||
|
||||
public abstract class FieldStats<T> implements Writeable, ToXContent {
|
||||
private final byte type;
|
||||
|
@ -46,13 +47,11 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
protected T minValue;
|
||||
protected T maxValue;
|
||||
|
||||
FieldStats(byte type, long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
this(type, maxDoc, 0, 0, 0, isSearchable, isAggregatable, null, null);
|
||||
}
|
||||
|
||||
FieldStats(byte type,
|
||||
long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable, T minValue, T maxValue) {
|
||||
Objects.requireNonNull(minValue, "minValue must not be null");
|
||||
Objects.requireNonNull(maxValue, "maxValue must not be null");
|
||||
this.type = type;
|
||||
this.maxDoc = maxDoc;
|
||||
this.docCount = docCount;
|
||||
|
@ -220,14 +219,10 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
|
||||
private void updateMinMax(T min, T max) {
|
||||
if (minValue == null) {
|
||||
minValue = min;
|
||||
} else if (min != null && compare(minValue, min) > 0) {
|
||||
if (compare(minValue, min) > 0) {
|
||||
minValue = min;
|
||||
}
|
||||
if (maxValue == null) {
|
||||
maxValue = max;
|
||||
} else if (max != null && compare(maxValue, max) < 0) {
|
||||
if (compare(maxValue, max) < 0) {
|
||||
maxValue = max;
|
||||
}
|
||||
}
|
||||
|
@ -266,11 +261,7 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
out.writeLong(sumTotalTermFreq);
|
||||
out.writeBoolean(isSearchable);
|
||||
out.writeBoolean(isAggregatable);
|
||||
boolean hasMinMax = minValue != null;
|
||||
out.writeBoolean(hasMinMax);
|
||||
if (hasMinMax) {
|
||||
writeMinMax(out);
|
||||
}
|
||||
writeMinMax(out);
|
||||
}
|
||||
|
||||
protected abstract void writeMinMax(StreamOutput out) throws IOException;
|
||||
|
@ -280,9 +271,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
* otherwise <code>false</code> is returned
|
||||
*/
|
||||
public boolean match(IndexConstraint constraint) {
|
||||
if (minValue == null) {
|
||||
return false;
|
||||
}
|
||||
int cmp;
|
||||
T value = valueOf(constraint.getValue(), constraint.getOptionalFormat());
|
||||
if (constraint.getProperty() == IndexConstraint.Property.MIN) {
|
||||
|
@ -307,6 +295,31 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
|
||||
FieldStats<?> that = (FieldStats<?>) o;
|
||||
|
||||
if (type != that.type) return false;
|
||||
if (maxDoc != that.maxDoc) return false;
|
||||
if (docCount != that.docCount) return false;
|
||||
if (sumDocFreq != that.sumDocFreq) return false;
|
||||
if (sumTotalTermFreq != that.sumTotalTermFreq) return false;
|
||||
if (isSearchable != that.isSearchable) return false;
|
||||
if (isAggregatable != that.isAggregatable) return false;
|
||||
if (!minValue.equals(that.minValue)) return false;
|
||||
return maxValue.equals(that.maxValue);
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
return Objects.hash(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
||||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public static class Long extends FieldStats<java.lang.Long> {
|
||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
|
@ -315,17 +328,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
isSearchable, isAggregatable, minValue, maxValue);
|
||||
}
|
||||
|
||||
public Long(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 0, maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, null, null);
|
||||
}
|
||||
|
||||
public Long(long maxDoc,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 0, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(java.lang.Long o1, java.lang.Long o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -344,12 +346,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? java.lang.Long.toString(minValue) : null;
|
||||
return java.lang.Long.toString(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? java.lang.Long.toString(maxValue) : null;
|
||||
return java.lang.Long.toString(maxValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,15 +363,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public Double(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 1, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable, null, null);
|
||||
}
|
||||
|
||||
public Double(long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 1, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(java.lang.Double o1, java.lang.Double o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -391,12 +384,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? java.lang.Double.toString(minValue) : null;
|
||||
return java.lang.Double.toString(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? java.lang.Double.toString(maxValue) : null;
|
||||
return java.lang.Double.toString(maxValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -412,20 +405,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
this.formatter = formatter;
|
||||
}
|
||||
|
||||
public Date(long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq,
|
||||
boolean isSearchable, boolean isAggregatable,
|
||||
FormatDateTimeFormatter formatter) {
|
||||
super((byte) 2, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, isSearchable, isAggregatable,
|
||||
null, null);
|
||||
this.formatter = formatter;
|
||||
}
|
||||
|
||||
public Date(long maxDoc, boolean isSearchable, boolean isAggregatable,
|
||||
FormatDateTimeFormatter formatter) {
|
||||
super((byte) 2, maxDoc, isSearchable, isAggregatable);
|
||||
this.formatter = formatter;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(java.lang.Long o1, java.lang.Long o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -449,12 +428,29 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? formatter.printer().print(minValue) : null;
|
||||
return formatter.printer().print(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? formatter.printer().print(maxValue) : null;
|
||||
return formatter.printer().print(maxValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean equals(Object o) {
|
||||
if (this == o) return true;
|
||||
if (o == null || getClass() != o.getClass()) return false;
|
||||
if (!super.equals(o)) return false;
|
||||
|
||||
Date that = (Date) o;
|
||||
return Objects.equals(formatter.format(), that.formatter.format());
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = super.hashCode();
|
||||
result = 31 * result + formatter.format().hashCode();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -467,10 +463,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public Text(long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 3, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(BytesRef o1, BytesRef o2) {
|
||||
return o1.compareTo(o2);
|
||||
|
@ -492,12 +484,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? minValue.utf8ToString() : null;
|
||||
return minValue.utf8ToString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? maxValue.utf8ToString() : null;
|
||||
return maxValue.utf8ToString();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -516,10 +508,6 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
minValue, maxValue);
|
||||
}
|
||||
|
||||
public Ip(long maxDoc, boolean isSearchable, boolean isAggregatable) {
|
||||
super((byte) 4, maxDoc, isSearchable, isAggregatable);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int compare(InetAddress o1, InetAddress o2) {
|
||||
byte[] b1 = InetAddressPoint.encode(o1);
|
||||
|
@ -544,12 +532,12 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
|
||||
@Override
|
||||
public String getMinValueAsString() {
|
||||
return minValue != null ? NetworkAddress.format(minValue) : null;
|
||||
return NetworkAddress.format(minValue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMaxValueAsString() {
|
||||
return maxValue != null ? NetworkAddress.format(maxValue) : null;
|
||||
return NetworkAddress.format(maxValue);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -561,53 +549,35 @@ public abstract class FieldStats<T> implements Writeable, ToXContent {
|
|||
long sumTotalTermFreq = in.readLong();
|
||||
boolean isSearchable = in.readBoolean();
|
||||
boolean isAggregatable = in.readBoolean();
|
||||
boolean hasMinMax = in.readBoolean();
|
||||
|
||||
switch (type) {
|
||||
case 0:
|
||||
if (hasMinMax) {
|
||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
||||
}
|
||||
return new Long(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
isSearchable, isAggregatable, in.readLong(), in.readLong());
|
||||
|
||||
case 1:
|
||||
if (hasMinMax) {
|
||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
||||
}
|
||||
return new Double(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable);
|
||||
isSearchable, isAggregatable, in.readDouble(), in.readDouble());
|
||||
|
||||
case 2:
|
||||
FormatDateTimeFormatter formatter = Joda.forPattern(in.readString());
|
||||
if (hasMinMax) {
|
||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
||||
}
|
||||
return new Date(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, formatter);
|
||||
isSearchable, isAggregatable, formatter, in.readLong(), in.readLong());
|
||||
|
||||
|
||||
case 3:
|
||||
if (hasMinMax) {
|
||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
||||
}
|
||||
return new Text(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, null, null);
|
||||
isSearchable, isAggregatable, in.readBytesRef(), in.readBytesRef());
|
||||
|
||||
case 4:
|
||||
InetAddress min = null;
|
||||
InetAddress max = null;
|
||||
if (hasMinMax) {
|
||||
int l1 = in.readByte();
|
||||
byte[] b1 = new byte[l1];
|
||||
int l2 = in.readByte();
|
||||
byte[] b2 = new byte[l2];
|
||||
min = InetAddressPoint.decode(b1);
|
||||
max = InetAddressPoint.decode(b2);
|
||||
}
|
||||
int l1 = in.readByte();
|
||||
byte[] b1 = new byte[l1];
|
||||
in.readBytes(b1, 0, l1);
|
||||
int l2 = in.readByte();
|
||||
byte[] b2 = new byte[l2];
|
||||
in.readBytes(b2, 0, l2);
|
||||
InetAddress min = InetAddressPoint.decode(b1);
|
||||
InetAddress max = InetAddressPoint.decode(b2);
|
||||
return new Ip(maxDoc, docCount, sumDocFreq, sumTotalTermFreq,
|
||||
isSearchable, isAggregatable, min, max);
|
||||
|
||||
|
|
|
@ -51,7 +51,7 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
private String parent;
|
||||
private String preference;
|
||||
|
||||
private String[] fields;
|
||||
private String[] storedFields;
|
||||
|
||||
private FetchSourceContext fetchSourceContext;
|
||||
|
||||
|
@ -61,7 +61,6 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private boolean ignoreErrorsOnGeneratedFields;
|
||||
|
||||
public GetRequest() {
|
||||
type = "_all";
|
||||
|
@ -187,20 +186,20 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
}
|
||||
|
||||
/**
|
||||
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
|
||||
* Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
|
||||
* field will be returned.
|
||||
*/
|
||||
public GetRequest fields(String... fields) {
|
||||
this.fields = fields;
|
||||
public GetRequest storedFields(String... fields) {
|
||||
this.storedFields = fields;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
|
||||
* Explicitly specify the stored fields that will be returned. By default, the <tt>_source</tt>
|
||||
* field will be returned.
|
||||
*/
|
||||
public String[] fields() {
|
||||
return this.fields;
|
||||
public String[] storedFields() {
|
||||
return this.storedFields;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -248,19 +247,10 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
return this;
|
||||
}
|
||||
|
||||
public GetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
|
||||
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public VersionType versionType() {
|
||||
return this.versionType;
|
||||
}
|
||||
|
||||
public boolean ignoreErrorsOnGeneratedFields() {
|
||||
return ignoreErrorsOnGeneratedFields;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
|
@ -270,19 +260,12 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
parent = in.readOptionalString();
|
||||
preference = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
int size = in.readInt();
|
||||
if (size >= 0) {
|
||||
fields = new String[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
fields[i] = in.readString();
|
||||
}
|
||||
}
|
||||
storedFields = in.readOptionalStringArray();
|
||||
realtime = in.readBoolean();
|
||||
this.ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
|
||||
this.versionType = VersionType.fromValue(in.readByte());
|
||||
this.version = in.readLong();
|
||||
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
|
||||
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -295,19 +278,11 @@ public class GetRequest extends SingleShardRequest<GetRequest> implements Realti
|
|||
out.writeOptionalString(preference);
|
||||
|
||||
out.writeBoolean(refresh);
|
||||
if (fields == null) {
|
||||
out.writeInt(-1);
|
||||
} else {
|
||||
out.writeInt(fields.length);
|
||||
for (String field : fields) {
|
||||
out.writeString(field);
|
||||
}
|
||||
}
|
||||
out.writeOptionalStringArray(storedFields);
|
||||
out.writeBoolean(realtime);
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
out.writeByte(versionType.getValue());
|
||||
out.writeLong(version);
|
||||
out.writeOptionalStreamable(fetchSourceContext);
|
||||
out.writeOptionalWriteable(fetchSourceContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -88,8 +88,8 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
|
|||
* Explicitly specify the fields that will be returned. By default, the <tt>_source</tt>
|
||||
* field will be returned.
|
||||
*/
|
||||
public GetRequestBuilder setFields(String... fields) {
|
||||
request.fields(fields);
|
||||
public GetRequestBuilder setStoredFields(String... fields) {
|
||||
request.storedFields(fields);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -155,11 +155,6 @@ public class GetRequestBuilder extends SingleShardOperationRequestBuilder<GetReq
|
|||
return this;
|
||||
}
|
||||
|
||||
public GetRequestBuilder setIgnoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
|
||||
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the version, which will cause the get operation to only be performed if a matching
|
||||
* version exists and no changes happened on the doc since then.
|
||||
|
|
|
@ -134,14 +134,26 @@ public class GetResponse extends ActionResponse implements Iterable<GetField>, T
|
|||
return getResult.getSource();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link GetResponse#getSource()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public Map<String, GetField> getFields() {
|
||||
return getResult.getFields();
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link GetResponse#getSource()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public GetField getField(String name) {
|
||||
return getResult.field(name);
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated Use {@link GetResponse#getSource()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@Override
|
||||
public Iterator<GetField> iterator() {
|
||||
return getResult.iterator();
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.elasticsearch.action.RealtimeRequest;
|
|||
import org.elasticsearch.action.ValidateActions;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.ParsingException;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesArray;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
@ -58,7 +59,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
private String id;
|
||||
private String routing;
|
||||
private String parent;
|
||||
private String[] fields;
|
||||
private String[] storedFields;
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
private FetchSourceContext fetchSourceContext;
|
||||
|
@ -136,13 +137,13 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
return parent;
|
||||
}
|
||||
|
||||
public Item fields(String... fields) {
|
||||
this.fields = fields;
|
||||
public Item storedFields(String... fields) {
|
||||
this.storedFields = fields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public String[] fields() {
|
||||
return this.fields;
|
||||
public String[] storedFields() {
|
||||
return this.storedFields;
|
||||
}
|
||||
|
||||
public long version() {
|
||||
|
@ -188,17 +189,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
id = in.readString();
|
||||
routing = in.readOptionalString();
|
||||
parent = in.readOptionalString();
|
||||
int size = in.readVInt();
|
||||
if (size > 0) {
|
||||
fields = new String[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
fields[i] = in.readString();
|
||||
}
|
||||
}
|
||||
storedFields = in.readOptionalStringArray();
|
||||
version = in.readLong();
|
||||
versionType = VersionType.fromValue(in.readByte());
|
||||
|
||||
fetchSourceContext = in.readOptionalStreamable(FetchSourceContext::new);
|
||||
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -208,19 +203,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
out.writeString(id);
|
||||
out.writeOptionalString(routing);
|
||||
out.writeOptionalString(parent);
|
||||
if (fields == null) {
|
||||
out.writeVInt(0);
|
||||
} else {
|
||||
out.writeVInt(fields.length);
|
||||
for (String field : fields) {
|
||||
out.writeString(field);
|
||||
}
|
||||
}
|
||||
|
||||
out.writeOptionalStringArray(storedFields);
|
||||
out.writeLong(version);
|
||||
out.writeByte(versionType.getValue());
|
||||
|
||||
out.writeOptionalStreamable(fetchSourceContext);
|
||||
out.writeOptionalWriteable(fetchSourceContext);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -233,7 +220,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
if (version != item.version) return false;
|
||||
if (fetchSourceContext != null ? !fetchSourceContext.equals(item.fetchSourceContext) : item.fetchSourceContext != null)
|
||||
return false;
|
||||
if (!Arrays.equals(fields, item.fields)) return false;
|
||||
if (!Arrays.equals(storedFields, item.storedFields)) return false;
|
||||
if (!id.equals(item.id)) return false;
|
||||
if (!index.equals(item.index)) return false;
|
||||
if (routing != null ? !routing.equals(item.routing) : item.routing != null) return false;
|
||||
|
@ -251,7 +238,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
result = 31 * result + id.hashCode();
|
||||
result = 31 * result + (routing != null ? routing.hashCode() : 0);
|
||||
result = 31 * result + (parent != null ? parent.hashCode() : 0);
|
||||
result = 31 * result + (fields != null ? Arrays.hashCode(fields) : 0);
|
||||
result = 31 * result + (storedFields != null ? Arrays.hashCode(storedFields) : 0);
|
||||
result = 31 * result + Long.hashCode(version);
|
||||
result = 31 * result + versionType.hashCode();
|
||||
result = 31 * result + (fetchSourceContext != null ? fetchSourceContext.hashCode() : 0);
|
||||
|
@ -262,8 +249,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
String preference;
|
||||
boolean realtime = true;
|
||||
boolean refresh;
|
||||
public boolean ignoreErrorsOnGeneratedFields = false;
|
||||
|
||||
List<Item> items = new ArrayList<>();
|
||||
|
||||
public List<Item> getItems() {
|
||||
|
@ -338,11 +323,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
}
|
||||
|
||||
|
||||
public MultiGetRequest ignoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
|
||||
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public MultiGetRequest add(@Nullable String defaultIndex, @Nullable String defaultType, @Nullable String[] defaultFields, @Nullable FetchSourceContext defaultFetchSource, byte[] data, int from, int length) throws Exception {
|
||||
return add(defaultIndex, defaultType, defaultFields, defaultFetchSource, new BytesArray(data, from, length), true);
|
||||
}
|
||||
|
@ -386,7 +366,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
String id = null;
|
||||
String routing = defaultRouting;
|
||||
String parent = null;
|
||||
List<String> fields = null;
|
||||
List<String> storedFields = null;
|
||||
long version = Versions.MATCH_ANY;
|
||||
VersionType versionType = VersionType.INTERNAL;
|
||||
|
||||
|
@ -410,8 +390,11 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
} else if ("_parent".equals(currentFieldName) || "parent".equals(currentFieldName)) {
|
||||
parent = parser.text();
|
||||
} else if ("fields".equals(currentFieldName)) {
|
||||
fields = new ArrayList<>();
|
||||
fields.add(parser.text());
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unsupported field [fields] used, expected [stored_fields] instead");
|
||||
} else if ("stored_fields".equals(currentFieldName)) {
|
||||
storedFields = new ArrayList<>();
|
||||
storedFields.add(parser.text());
|
||||
} else if ("_version".equals(currentFieldName) || "version".equals(currentFieldName)) {
|
||||
version = parser.longValue();
|
||||
} else if ("_version_type".equals(currentFieldName) || "_versionType".equals(currentFieldName) || "version_type".equals(currentFieldName) || "versionType".equals(currentFieldName)) {
|
||||
|
@ -427,9 +410,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
}
|
||||
} else if (token == XContentParser.Token.START_ARRAY) {
|
||||
if ("fields".equals(currentFieldName)) {
|
||||
fields = new ArrayList<>();
|
||||
throw new ParsingException(parser.getTokenLocation(),
|
||||
"Unsupported field [fields] used, expected [stored_fields] instead");
|
||||
} else if ("stored_fields".equals(currentFieldName)) {
|
||||
storedFields = new ArrayList<>();
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_ARRAY) {
|
||||
fields.add(parser.text());
|
||||
storedFields.add(parser.text());
|
||||
}
|
||||
} else if ("_source".equals(currentFieldName)) {
|
||||
ArrayList<String> includes = new ArrayList<>();
|
||||
|
@ -471,12 +457,12 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
}
|
||||
}
|
||||
String[] aFields;
|
||||
if (fields != null) {
|
||||
aFields = fields.toArray(new String[fields.size()]);
|
||||
if (storedFields != null) {
|
||||
aFields = storedFields.toArray(new String[storedFields.size()]);
|
||||
} else {
|
||||
aFields = defaultFields;
|
||||
}
|
||||
items.add(new Item(index, type, id).routing(routing).fields(aFields).parent(parent).version(version).versionType(versionType)
|
||||
items.add(new Item(index, type, id).routing(routing).storedFields(aFields).parent(parent).version(version).versionType(versionType)
|
||||
.fetchSourceContext(fetchSourceContext == null ? defaultFetchSource : fetchSourceContext));
|
||||
}
|
||||
}
|
||||
|
@ -491,7 +477,7 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
if (!token.isValue()) {
|
||||
throw new IllegalArgumentException("ids array element should only contain ids");
|
||||
}
|
||||
items.add(new Item(defaultIndex, defaultType, parser.text()).fields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
|
||||
items.add(new Item(defaultIndex, defaultType, parser.text()).storedFields(defaultFields).fetchSourceContext(defaultFetchSource).routing(defaultRouting));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -510,7 +496,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
preference = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
realtime = in.readBoolean();
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
|
||||
int size = in.readVInt();
|
||||
items = new ArrayList<>(size);
|
||||
|
@ -525,7 +510,6 @@ public class MultiGetRequest extends ActionRequest<MultiGetRequest> implements I
|
|||
out.writeOptionalString(preference);
|
||||
out.writeBoolean(refresh);
|
||||
out.writeBoolean(realtime);
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
|
||||
out.writeVInt(items.size());
|
||||
for (Item item : items) {
|
||||
|
|
|
@ -80,9 +80,4 @@ public class MultiGetRequestBuilder extends ActionRequestBuilder<MultiGetRequest
|
|||
request.realtime(realtime);
|
||||
return this;
|
||||
}
|
||||
|
||||
public MultiGetRequestBuilder setIgnoreErrorsOnGeneratedFields(boolean ignoreErrorsOnGeneratedFields) {
|
||||
request.ignoreErrorsOnGeneratedFields(ignoreErrorsOnGeneratedFields);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,7 +35,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
private String preference;
|
||||
boolean realtime = true;
|
||||
boolean refresh;
|
||||
boolean ignoreErrorsOnGeneratedFields = false;
|
||||
|
||||
IntArrayList locations;
|
||||
List<MultiGetRequest.Item> items;
|
||||
|
@ -52,7 +51,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
preference = multiGetRequest.preference;
|
||||
realtime = multiGetRequest.realtime;
|
||||
refresh = multiGetRequest.refresh;
|
||||
ignoreErrorsOnGeneratedFields = multiGetRequest.ignoreErrorsOnGeneratedFields;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,11 +85,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
return this;
|
||||
}
|
||||
|
||||
public MultiGetShardRequest ignoreErrorsOnGeneratedFields(Boolean ignoreErrorsOnGeneratedFields) {
|
||||
this.ignoreErrorsOnGeneratedFields = ignoreErrorsOnGeneratedFields;
|
||||
return this;
|
||||
}
|
||||
|
||||
public boolean refresh() {
|
||||
return this.refresh;
|
||||
}
|
||||
|
@ -130,7 +123,6 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
preference = in.readOptionalString();
|
||||
refresh = in.readBoolean();
|
||||
realtime = in.readBoolean();
|
||||
ignoreErrorsOnGeneratedFields = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -146,11 +138,5 @@ public class MultiGetShardRequest extends SingleShardRequest<MultiGetShardReques
|
|||
out.writeOptionalString(preference);
|
||||
out.writeBoolean(refresh);
|
||||
out.writeBoolean(realtime);
|
||||
out.writeBoolean(ignoreErrorsOnGeneratedFields);
|
||||
|
||||
}
|
||||
|
||||
public boolean ignoreErrorsOnGeneratedFields() {
|
||||
return ignoreErrorsOnGeneratedFields;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -92,8 +92,8 @@ public class TransportGetAction extends TransportSingleShardAction<GetRequest, G
|
|||
indexShard.refresh("refresh_flag_get");
|
||||
}
|
||||
|
||||
GetResult result = indexShard.getService().get(request.type(), request.id(), request.fields(),
|
||||
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
|
||||
GetResult result = indexShard.getService().get(request.type(), request.id(), request.storedFields(),
|
||||
request.realtime(), request.version(), request.versionType(), request.fetchSourceContext());
|
||||
return new GetResponse(result);
|
||||
}
|
||||
|
||||
|
|
|
@ -88,13 +88,15 @@ public class TransportShardMultiGetAction extends TransportSingleShardAction<Mul
|
|||
for (int i = 0; i < request.locations.size(); i++) {
|
||||
MultiGetRequest.Item item = request.items.get(i);
|
||||
try {
|
||||
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.fields(), request.realtime(), item.version(), item.versionType(), item.fetchSourceContext(), request.ignoreErrorsOnGeneratedFields());
|
||||
GetResult getResult = indexShard.getService().get(item.type(), item.id(), item.storedFields(), request.realtime(), item.version(),
|
||||
item.versionType(), item.fetchSourceContext());
|
||||
response.add(request.locations.get(i), new GetResponse(getResult));
|
||||
} catch (Exception e) {
|
||||
if (TransportActions.isShardNotAvailableException(e)) {
|
||||
throw (ElasticsearchException) e;
|
||||
} else {
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId, item.type(), item.id()), e);
|
||||
logger.debug((Supplier<?>) () -> new ParameterizedMessage("{} failed to execute multi_get for [{}]/[{}]", shardId,
|
||||
item.type(), item.id()), e);
|
||||
response.add(request.locations.get(i), new MultiGetResponse.Failure(request.index(), item.type(), item.id(), e));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,16 +35,18 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
private String nodeName;
|
||||
private Version version;
|
||||
private ClusterName clusterName;
|
||||
private String clusterUuid;
|
||||
private Build build;
|
||||
private boolean available;
|
||||
|
||||
MainResponse() {
|
||||
}
|
||||
|
||||
public MainResponse(String nodeName, Version version, ClusterName clusterName, Build build, boolean available) {
|
||||
public MainResponse(String nodeName, Version version, ClusterName clusterName, String clusterUuid, Build build, boolean available) {
|
||||
this.nodeName = nodeName;
|
||||
this.version = version;
|
||||
this.clusterName = clusterName;
|
||||
this.clusterUuid = clusterUuid;
|
||||
this.build = build;
|
||||
this.available = available;
|
||||
}
|
||||
|
@ -61,6 +63,10 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
return clusterName;
|
||||
}
|
||||
|
||||
public String getClusterUuid() {
|
||||
return clusterUuid;
|
||||
}
|
||||
|
||||
public Build getBuild() {
|
||||
return build;
|
||||
}
|
||||
|
@ -75,6 +81,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
out.writeString(nodeName);
|
||||
Version.writeVersion(version, out);
|
||||
clusterName.writeTo(out);
|
||||
out.writeString(clusterUuid);
|
||||
Build.writeBuild(build, out);
|
||||
out.writeBoolean(available);
|
||||
}
|
||||
|
@ -85,6 +92,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
nodeName = in.readString();
|
||||
version = Version.readVersion(in);
|
||||
clusterName = new ClusterName(in);
|
||||
clusterUuid = in.readString();
|
||||
build = Build.readBuild(in);
|
||||
available = in.readBoolean();
|
||||
}
|
||||
|
@ -94,6 +102,7 @@ public class MainResponse extends ActionResponse implements ToXContent {
|
|||
builder.startObject();
|
||||
builder.field("name", nodeName);
|
||||
builder.field("cluster_name", clusterName.value());
|
||||
builder.field("cluster_uuid", clusterUuid);
|
||||
builder.startObject("version")
|
||||
.field("number", version.toString())
|
||||
.field("build_hash", build.shortHash())
|
||||
|
|
|
@ -52,7 +52,7 @@ public class TransportMainAction extends HandledTransportAction<MainRequest, Mai
|
|||
assert Node.NODE_NAME_SETTING.exists(settings);
|
||||
final boolean available = clusterState.getBlocks().hasGlobalBlock(RestStatus.SERVICE_UNAVAILABLE) == false;
|
||||
listener.onResponse(
|
||||
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(), Build.CURRENT,
|
||||
available));
|
||||
new MainResponse(Node.NODE_NAME_SETTING.get(settings), Version.CURRENT, clusterState.getClusterName(),
|
||||
clusterState.metaData().clusterUUID(), Build.CURRENT, available));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,8 +40,6 @@ import org.elasticsearch.common.Nullable;
|
|||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
|
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.dfs.AggregatedDfs;
|
||||
import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.controller;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.carrotsearch.hppc.IntArrayList;
|
||||
import com.carrotsearch.hppc.ObjectObjectHashMap;
|
||||
|
@ -89,8 +89,7 @@ public class SearchPhaseController extends AbstractComponent {
|
|||
private final ScriptService scriptService;
|
||||
private final ClusterService clusterService;
|
||||
|
||||
@Inject
|
||||
public SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
|
||||
SearchPhaseController(Settings settings, BigArrays bigArrays, ScriptService scriptService, ClusterService clusterService) {
|
||||
super(settings);
|
||||
this.bigArrays = bigArrays;
|
||||
this.scriptService = scriptService;
|
|
@ -25,8 +25,6 @@ import org.elasticsearch.action.ActionRunnable;
|
|||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
import org.elasticsearch.search.internal.ShardSearchTransportRequest;
|
||||
|
|
|
@ -31,8 +31,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchShardTarget;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchSearchRequest;
|
||||
import org.elasticsearch.search.internal.InternalSearchResponse;
|
||||
|
|
|
@ -28,8 +28,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.QueryFetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ScrollQueryFetchSearchResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
|
|
|
@ -29,8 +29,6 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
|||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.search.fetch.FetchSearchResult;
|
||||
import org.elasticsearch.search.fetch.ShardFetchRequest;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
|
|
|
@ -17,17 +17,15 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.search.action;
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ActionListenerResponseHandler;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.OriginalIndices;
|
||||
import org.elasticsearch.action.search.SearchRequest;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -45,9 +43,7 @@ import org.elasticsearch.search.query.QuerySearchResult;
|
|||
import org.elasticsearch.search.query.QuerySearchResultProvider;
|
||||
import org.elasticsearch.search.query.ScrollQuerySearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -73,37 +69,10 @@ public class SearchTransportService extends AbstractComponent {
|
|||
public static final String FETCH_ID_ACTION_NAME = "indices:data/read/search[phase/fetch/id]";
|
||||
|
||||
private final TransportService transportService;
|
||||
private final SearchService searchService;
|
||||
|
||||
@Inject
|
||||
public SearchTransportService(Settings settings, TransportService transportService, SearchService searchService) {
|
||||
SearchTransportService(Settings settings, TransportService transportService) {
|
||||
super(settings);
|
||||
this.transportService = transportService;
|
||||
this.searchService = searchService;
|
||||
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
|
||||
new FreeContextTransportHandler<>());
|
||||
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
|
||||
new FreeContextTransportHandler<>());
|
||||
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, ClearScrollContextsRequest::new, ThreadPool.Names.SAME,
|
||||
new ClearScrollContextsTransportHandler());
|
||||
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchDfsTransportHandler());
|
||||
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchQueryTransportHandler());
|
||||
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchQueryByIdTransportHandler());
|
||||
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchQueryScrollTransportHandler());
|
||||
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchQueryFetchTransportHandler());
|
||||
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchQueryQueryFetchTransportHandler());
|
||||
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
new SearchQueryFetchScrollTransportHandler());
|
||||
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
|
||||
new FetchByIdTransportHandler<>());
|
||||
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
new FetchByIdTransportHandler<>());
|
||||
}
|
||||
|
||||
public void sendFreeContext(DiscoveryNode node, final long contextId, SearchRequest request) {
|
||||
|
@ -127,8 +96,8 @@ public class SearchTransportService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public void sendClearAllScrollContexts(DiscoveryNode node, final ActionListener<TransportResponse> listener) {
|
||||
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, new ClearScrollContextsRequest(),
|
||||
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
|
||||
transportService.sendRequest(node, CLEAR_SCROLL_CONTEXTS_ACTION_NAME, TransportRequest.Empty.INSTANCE,
|
||||
new ActionListenerResponseHandler<>(listener, () -> TransportResponse.Empty.INSTANCE));
|
||||
}
|
||||
|
||||
public void sendExecuteDfs(DiscoveryNode node, final ShardSearchTransportRequest request,
|
||||
|
@ -281,87 +250,66 @@ public class SearchTransportService extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
class FreeContextTransportHandler<FreeContextRequest extends ScrollFreeContextRequest>
|
||||
implements TransportRequestHandler<FreeContextRequest> {
|
||||
@Override
|
||||
public void messageReceived(FreeContextRequest request, TransportChannel channel) throws Exception {
|
||||
boolean freed = searchService.freeContext(request.id());
|
||||
channel.sendResponse(new SearchFreeContextResponse(freed));
|
||||
}
|
||||
}
|
||||
|
||||
static class ClearScrollContextsRequest extends TransportRequest {
|
||||
}
|
||||
|
||||
class ClearScrollContextsTransportHandler implements TransportRequestHandler<ClearScrollContextsRequest> {
|
||||
@Override
|
||||
public void messageReceived(ClearScrollContextsRequest request, TransportChannel channel) throws Exception {
|
||||
searchService.freeAllScrollContexts();
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchDfsTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
|
||||
DfsSearchResult result = searchService.executeDfsPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchQueryTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
|
||||
QuerySearchResultProvider result = searchService.executeQueryPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchQueryByIdTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
|
||||
@Override
|
||||
public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
|
||||
QuerySearchResult result = searchService.executeQueryPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchQueryScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
|
||||
@Override
|
||||
public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
|
||||
ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchQueryFetchTransportHandler implements TransportRequestHandler<ShardSearchTransportRequest> {
|
||||
@Override
|
||||
public void messageReceived(ShardSearchTransportRequest request, TransportChannel channel) throws Exception {
|
||||
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchQueryQueryFetchTransportHandler implements TransportRequestHandler<QuerySearchRequest> {
|
||||
@Override
|
||||
public void messageReceived(QuerySearchRequest request, TransportChannel channel) throws Exception {
|
||||
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class FetchByIdTransportHandler<Request extends ShardFetchRequest> implements TransportRequestHandler<Request> {
|
||||
@Override
|
||||
public void messageReceived(Request request, TransportChannel channel) throws Exception {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
}
|
||||
|
||||
class SearchQueryFetchScrollTransportHandler implements TransportRequestHandler<InternalScrollSearchRequest> {
|
||||
@Override
|
||||
public void messageReceived(InternalScrollSearchRequest request, TransportChannel channel) throws Exception {
|
||||
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
}
|
||||
public static void registerRequestHandler(TransportService transportService, SearchService searchService) {
|
||||
transportService.registerRequestHandler(FREE_CONTEXT_SCROLL_ACTION_NAME, ScrollFreeContextRequest::new, ThreadPool.Names.SAME,
|
||||
((request, channel) -> {
|
||||
boolean freed = searchService.freeContext(request.id());
|
||||
channel.sendResponse(new SearchFreeContextResponse(freed));
|
||||
}));
|
||||
transportService.registerRequestHandler(FREE_CONTEXT_ACTION_NAME, SearchFreeContextRequest::new, ThreadPool.Names.SAME,
|
||||
(request, channel) -> {
|
||||
boolean freed = searchService.freeContext(request.id());
|
||||
channel.sendResponse(new SearchFreeContextResponse(freed));
|
||||
});
|
||||
transportService.registerRequestHandler(CLEAR_SCROLL_CONTEXTS_ACTION_NAME, () -> TransportRequest.Empty.INSTANCE,
|
||||
ThreadPool.Names.SAME, (request, channel) -> {
|
||||
searchService.freeAllScrollContexts();
|
||||
channel.sendResponse(TransportResponse.Empty.INSTANCE);
|
||||
});
|
||||
transportService.registerRequestHandler(DFS_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
DfsSearchResult result = searchService.executeDfsPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(QUERY_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
QuerySearchResultProvider result = searchService.executeQueryPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(QUERY_ID_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
QuerySearchResult result = searchService.executeQueryPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(QUERY_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
ScrollQuerySearchResult result = searchService.executeQueryPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(QUERY_FETCH_ACTION_NAME, ShardSearchTransportRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(QUERY_QUERY_FETCH_ACTION_NAME, QuerySearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
QueryFetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(QUERY_FETCH_SCROLL_ACTION_NAME, InternalScrollSearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
ScrollQueryFetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(FETCH_ID_SCROLL_ACTION_NAME, ShardFetchRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
transportService.registerRequestHandler(FETCH_ID_ACTION_NAME, ShardFetchSearchRequest::new, ThreadPool.Names.SEARCH,
|
||||
(request, channel) -> {
|
||||
FetchSearchResult result = searchService.executeFetchPhase(request);
|
||||
channel.sendResponse(result);
|
||||
});
|
||||
}
|
||||
}
|
|
@ -32,7 +32,6 @@ import org.elasticsearch.cluster.service.ClusterService;
|
|||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
@ -44,8 +43,6 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
|
||||
import static org.elasticsearch.action.search.TransportSearchHelper.parseScrollId;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TransportClearScrollAction extends HandledTransportAction<ClearScrollRequest, ClearScrollResponse> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
|
@ -53,11 +50,11 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
|||
|
||||
@Inject
|
||||
public TransportClearScrollAction(Settings settings, TransportService transportService, ThreadPool threadPool,
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, ClearScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -29,10 +29,11 @@ import org.elasticsearch.common.inject.Inject;
|
|||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.search.SearchService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -53,13 +54,14 @@ public class TransportSearchAction extends HandledTransportAction<SearchRequest,
|
|||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, SearchPhaseController searchPhaseController,
|
||||
TransportService transportService, SearchTransportService searchTransportService,
|
||||
public TransportSearchAction(Settings settings, ThreadPool threadPool, BigArrays bigArrays, ScriptService scriptService,
|
||||
TransportService transportService, SearchService searchService,
|
||||
ClusterService clusterService, ActionFilters actionFilters, IndexNameExpressionResolver
|
||||
indexNameExpressionResolver) {
|
||||
super(settings, SearchAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, SearchRequest::new);
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService);
|
||||
SearchTransportService.registerRequestHandler(transportService, searchService);
|
||||
this.clusterService = clusterService;
|
||||
}
|
||||
|
||||
|
|
|
@ -26,8 +26,8 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
|||
import org.elasticsearch.cluster.service.ClusterService;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.search.action.SearchTransportService;
|
||||
import org.elasticsearch.search.controller.SearchPhaseController;
|
||||
import org.elasticsearch.common.util.BigArrays;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
|
@ -45,15 +45,14 @@ public class TransportSearchScrollAction extends HandledTransportAction<SearchSc
|
|||
private final SearchPhaseController searchPhaseController;
|
||||
|
||||
@Inject
|
||||
public TransportSearchScrollAction(Settings settings, ThreadPool threadPool, TransportService transportService,
|
||||
ClusterService clusterService, SearchTransportService searchTransportService,
|
||||
SearchPhaseController searchPhaseController,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
public TransportSearchScrollAction(Settings settings, BigArrays bigArrays, ThreadPool threadPool, ScriptService scriptService,
|
||||
TransportService transportService, ClusterService clusterService, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, SearchScrollAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver,
|
||||
SearchScrollRequest::new);
|
||||
this.clusterService = clusterService;
|
||||
this.searchTransportService = searchTransportService;
|
||||
this.searchPhaseController = searchPhaseController;
|
||||
this.searchTransportService = new SearchTransportService(settings, transportService);
|
||||
this.searchPhaseController = new SearchPhaseController(settings, bigArrays, scriptService, clusterService);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -180,7 +180,7 @@ public class TermVectorsRequest extends SingleShardRequest<TermVectorsRequest> i
|
|||
super(item.index());
|
||||
this.id = item.id();
|
||||
this.type = item.type();
|
||||
this.selectedFields(item.fields());
|
||||
this.selectedFields(item.storedFields());
|
||||
this.routing(item.routing());
|
||||
this.parent(item.parent());
|
||||
}
|
||||
|
|
|
@ -186,7 +186,8 @@ public class TransportUpdateAction extends TransportInstanceSingleOperationActio
|
|||
@Override
|
||||
public void onResponse(IndexResponse response) {
|
||||
UpdateResponse update = new UpdateResponse(response.getShardInfo(), response.getShardId(), response.getType(), response.getId(), response.getVersion(), response.getResult());
|
||||
if (request.fields() != null && request.fields().length > 0) {
|
||||
if ((request.fetchSource() != null && request.fetchSource().fetchSource()) ||
|
||||
(request.fields() != null && request.fields().length > 0)) {
|
||||
Tuple<XContentType, Map<String, Object>> sourceAndContent = XContentHelper.convertToMap(upsertSourceBytes, true);
|
||||
update.setGetResult(updateHelper.extractGetResult(request, request.concreteIndex(), response.getVersion(), sourceAndContent.v2(), sourceAndContent.v1(), upsertSourceBytes));
|
||||
} else {
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.elasticsearch.action.update;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.DocWriteResponse;
|
||||
import org.elasticsearch.action.delete.DeleteRequest;
|
||||
import org.elasticsearch.action.index.IndexRequest;
|
||||
|
@ -28,9 +29,11 @@ import org.elasticsearch.common.bytes.BytesReference;
|
|||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.BytesStreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentHelper;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
|
@ -51,6 +54,7 @@ import org.elasticsearch.script.ScriptService;
|
|||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
import org.elasticsearch.search.lookup.SourceLookup;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
|
@ -76,7 +80,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
public Result prepare(UpdateRequest request, IndexShard indexShard) {
|
||||
final GetResult getResult = indexShard.getService().get(request.type(), request.id(),
|
||||
new String[]{RoutingFieldMapper.NAME, ParentFieldMapper.NAME, TTLFieldMapper.NAME, TimestampFieldMapper.NAME},
|
||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE, false);
|
||||
true, request.version(), request.versionType(), FetchSourceContext.FETCH_SOURCE);
|
||||
return prepare(indexShard.shardId(), request, getResult);
|
||||
}
|
||||
|
||||
|
@ -139,12 +143,7 @@ public class UpdateHelper extends AbstractComponent {
|
|||
return new Result(indexRequest, DocWriteResponse.Result.CREATED, null, null);
|
||||
}
|
||||
|
||||
long updateVersion = getResult.getVersion();
|
||||
|
||||
if (request.versionType() != VersionType.INTERNAL) {
|
||||
assert request.versionType() == VersionType.FORCE;
|
||||
updateVersion = request.version(); // remember, match_any is excluded by the conflict test
|
||||
}
|
||||
final long updateVersion = getResult.getVersion();
|
||||
|
||||
if (getResult.internalSourceRef() == null) {
|
||||
// no source, we can't do nothing, through a failure...
|
||||
|
@ -272,17 +271,19 @@ public class UpdateHelper extends AbstractComponent {
|
|||
}
|
||||
|
||||
/**
|
||||
* Extracts the fields from the updated document to be returned in a update response
|
||||
* Applies {@link UpdateRequest#fetchSource()} to the _source of the updated document to be returned in a update response.
|
||||
* For BWC this function also extracts the {@link UpdateRequest#fields()} from the updated document to be returned in a update response
|
||||
*/
|
||||
public GetResult extractGetResult(final UpdateRequest request, String concreteIndex, long version, final Map<String, Object> source, XContentType sourceContentType, @Nullable final BytesReference sourceAsBytes) {
|
||||
if (request.fields() == null || request.fields().length == 0) {
|
||||
if ((request.fields() == null || request.fields().length == 0) &&
|
||||
(request.fetchSource() == null || request.fetchSource().fetchSource() == false)) {
|
||||
return null;
|
||||
}
|
||||
SourceLookup sourceLookup = new SourceLookup();
|
||||
sourceLookup.setSource(source);
|
||||
boolean sourceRequested = false;
|
||||
Map<String, GetField> fields = null;
|
||||
if (request.fields() != null && request.fields().length > 0) {
|
||||
SourceLookup sourceLookup = new SourceLookup();
|
||||
sourceLookup.setSource(source);
|
||||
for (String field : request.fields()) {
|
||||
if (field.equals("_source")) {
|
||||
sourceRequested = true;
|
||||
|
@ -303,8 +304,26 @@ public class UpdateHelper extends AbstractComponent {
|
|||
}
|
||||
}
|
||||
|
||||
BytesReference sourceFilteredAsBytes = sourceAsBytes;
|
||||
if (request.fetchSource() != null && request.fetchSource().fetchSource()) {
|
||||
sourceRequested = true;
|
||||
if (request.fetchSource().includes().length > 0 || request.fetchSource().excludes().length > 0) {
|
||||
Object value = sourceLookup.filter(request.fetchSource().includes(), request.fetchSource().excludes());
|
||||
try {
|
||||
final int initialCapacity = Math.min(1024, sourceAsBytes.length());
|
||||
BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity);
|
||||
try (XContentBuilder builder = new XContentBuilder(sourceContentType.xContent(), streamOutput)) {
|
||||
builder.value(value);
|
||||
sourceFilteredAsBytes = builder.bytes();
|
||||
}
|
||||
} catch (IOException e) {
|
||||
throw new ElasticsearchException("Error filtering source", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO when using delete/none, we can still return the source as bytes by generating it (using the sourceContentType)
|
||||
return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceAsBytes : null, fields);
|
||||
return new GetResult(concreteIndex, request.type(), request.id(), version, true, sourceRequested ? sourceFilteredAsBytes : null, fields);
|
||||
}
|
||||
|
||||
public static class Result {
|
||||
|
|
|
@ -32,6 +32,8 @@ import org.elasticsearch.common.bytes.BytesArray;
|
|||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.lucene.uid.Versions;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
@ -42,6 +44,7 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.script.Script;
|
||||
import org.elasticsearch.script.ScriptService;
|
||||
import org.elasticsearch.script.ScriptService.ScriptType;
|
||||
import org.elasticsearch.search.fetch.subphase.FetchSourceContext;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
|
@ -55,6 +58,8 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
|
|||
*/
|
||||
public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
implements DocumentRequest<UpdateRequest>, WriteRequest<UpdateRequest> {
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER =
|
||||
new DeprecationLogger(Loggers.getLogger(UpdateRequest.class));
|
||||
|
||||
private String type;
|
||||
private String id;
|
||||
|
@ -68,6 +73,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
Script script;
|
||||
|
||||
private String[] fields;
|
||||
private FetchSourceContext fetchSourceContext;
|
||||
|
||||
private long version = Versions.MATCH_ANY;
|
||||
private VersionType versionType = VersionType.INTERNAL;
|
||||
|
@ -106,8 +112,9 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
validationException = addValidationError("id is missing", validationException);
|
||||
}
|
||||
|
||||
if (!(versionType == VersionType.INTERNAL || versionType == VersionType.FORCE)) {
|
||||
validationException = addValidationError("version type [" + versionType + "] is not supported by the update API", validationException);
|
||||
if (versionType != VersionType.INTERNAL) {
|
||||
validationException = addValidationError("version type [" + versionType + "] is not supported by the update API",
|
||||
validationException);
|
||||
} else {
|
||||
|
||||
if (version != Versions.MATCH_ANY && retryOnConflict > 0) {
|
||||
|
@ -372,17 +379,80 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
|
||||
/**
|
||||
* Explicitly specify the fields that will be returned. By default, nothing is returned.
|
||||
* @deprecated Use {@link UpdateRequest#fetchSource(String[], String[])} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequest fields(String... fields) {
|
||||
this.fields = fields;
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the fields to be returned.
|
||||
* Indicate that _source should be returned with every hit, with an
|
||||
* "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param include
|
||||
* An optional include (optionally wildcarded) pattern to filter
|
||||
* the returned _source
|
||||
* @param exclude
|
||||
* An optional exclude (optionally wildcarded) pattern to filter
|
||||
* the returned _source
|
||||
*/
|
||||
public UpdateRequest fetchSource(@Nullable String include, @Nullable String exclude) {
|
||||
this.fetchSourceContext = new FetchSourceContext(include, exclude);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned, with an
|
||||
* "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param includes
|
||||
* An optional list of include (optionally wildcarded) pattern to
|
||||
* filter the returned _source
|
||||
* @param excludes
|
||||
* An optional list of exclude (optionally wildcarded) pattern to
|
||||
* filter the returned _source
|
||||
*/
|
||||
public UpdateRequest fetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
|
||||
this.fetchSourceContext = new FetchSourceContext(includes, excludes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the response should contain the updated _source.
|
||||
*/
|
||||
public UpdateRequest fetchSource(boolean fetchSource) {
|
||||
this.fetchSourceContext = new FetchSourceContext(fetchSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Explicitely set the fetch source context for this request
|
||||
*/
|
||||
public UpdateRequest fetchSource(FetchSourceContext context) {
|
||||
this.fetchSourceContext = context;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get the fields to be returned.
|
||||
* @deprecated Use {@link UpdateRequest#fetchSource()} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public String[] fields() {
|
||||
return this.fields;
|
||||
return fields;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the {@link FetchSourceContext} which defines how the _source should
|
||||
* be fetched.
|
||||
*/
|
||||
public FetchSourceContext fetchSource() {
|
||||
return fetchSourceContext;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -617,16 +687,16 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return upsertRequest;
|
||||
}
|
||||
|
||||
public UpdateRequest source(XContentBuilder source) throws Exception {
|
||||
return source(source.bytes());
|
||||
public UpdateRequest fromXContent(XContentBuilder source) throws Exception {
|
||||
return fromXContent(source.bytes());
|
||||
}
|
||||
|
||||
public UpdateRequest source(byte[] source) throws Exception {
|
||||
return source(source, 0, source.length);
|
||||
public UpdateRequest fromXContent(byte[] source) throws Exception {
|
||||
return fromXContent(source, 0, source.length);
|
||||
}
|
||||
|
||||
public UpdateRequest source(byte[] source, int offset, int length) throws Exception {
|
||||
return source(new BytesArray(source, offset, length));
|
||||
public UpdateRequest fromXContent(byte[] source, int offset, int length) throws Exception {
|
||||
return fromXContent(new BytesArray(source, offset, length));
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -645,7 +715,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
return detectNoop;
|
||||
}
|
||||
|
||||
public UpdateRequest source(BytesReference source) throws Exception {
|
||||
public UpdateRequest fromXContent(BytesReference source) throws Exception {
|
||||
Script script = null;
|
||||
try (XContentParser parser = XContentFactory.xContent(source).createParser(source)) {
|
||||
XContentParser.Token token = parser.nextToken();
|
||||
|
@ -684,6 +754,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
if (fields != null) {
|
||||
fields(fields.toArray(new String[fields.size()]));
|
||||
}
|
||||
} else if ("_source".equals(currentFieldName)) {
|
||||
fetchSourceContext = FetchSourceContext.parse(parser);
|
||||
}
|
||||
}
|
||||
if (script != null) {
|
||||
|
@ -728,13 +800,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
doc = new IndexRequest();
|
||||
doc.readFrom(in);
|
||||
}
|
||||
int size = in.readInt();
|
||||
if (size >= 0) {
|
||||
fields = new String[size];
|
||||
for (int i = 0; i < size; i++) {
|
||||
fields[i] = in.readString();
|
||||
}
|
||||
}
|
||||
fields = in.readOptionalStringArray();
|
||||
fetchSourceContext = in.readOptionalWriteable(FetchSourceContext::new);
|
||||
if (in.readBoolean()) {
|
||||
upsertRequest = new IndexRequest();
|
||||
upsertRequest.readFrom(in);
|
||||
|
@ -771,14 +838,8 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
|||
doc.id(id);
|
||||
doc.writeTo(out);
|
||||
}
|
||||
if (fields == null) {
|
||||
out.writeInt(-1);
|
||||
} else {
|
||||
out.writeInt(fields.length);
|
||||
for (String field : fields) {
|
||||
out.writeString(field);
|
||||
}
|
||||
}
|
||||
out.writeOptionalStringArray(fields);
|
||||
out.writeOptionalWriteable(fetchSourceContext);
|
||||
if (upsertRequest == null) {
|
||||
out.writeBoolean(false);
|
||||
} else {
|
||||
|
|
|
@ -25,17 +25,22 @@ import org.elasticsearch.action.support.WriteRequestBuilder;
|
|||
import org.elasticsearch.action.support.replication.ReplicationRequest;
|
||||
import org.elasticsearch.action.support.single.instance.InstanceShardOperationRequestBuilder;
|
||||
import org.elasticsearch.client.ElasticsearchClient;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.logging.DeprecationLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.index.VersionType;
|
||||
import org.elasticsearch.rest.action.document.RestUpdateAction;
|
||||
import org.elasticsearch.script.Script;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<UpdateRequest, UpdateResponse, UpdateRequestBuilder>
|
||||
implements WriteRequestBuilder<UpdateRequestBuilder> {
|
||||
private static final DeprecationLogger DEPRECATION_LOGGER =
|
||||
new DeprecationLogger(Loggers.getLogger(RestUpdateAction.class));
|
||||
|
||||
public UpdateRequestBuilder(ElasticsearchClient client, UpdateAction action) {
|
||||
super(client, action, new UpdateRequest());
|
||||
|
@ -90,12 +95,57 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
|
||||
/**
|
||||
* Explicitly specify the fields that will be returned. By default, nothing is returned.
|
||||
* @deprecated Use {@link UpdateRequestBuilder#setFetchSource(String[], String[])} instead
|
||||
*/
|
||||
@Deprecated
|
||||
public UpdateRequestBuilder setFields(String... fields) {
|
||||
DEPRECATION_LOGGER.deprecated("Deprecated field [fields] used, expected [_source] instead");
|
||||
request.fields(fields);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned with every hit, with an
|
||||
* "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param include
|
||||
* An optional include (optionally wildcarded) pattern to filter
|
||||
* the returned _source
|
||||
* @param exclude
|
||||
* An optional exclude (optionally wildcarded) pattern to filter
|
||||
* the returned _source
|
||||
*/
|
||||
public UpdateRequestBuilder setFetchSource(@Nullable String include, @Nullable String exclude) {
|
||||
request.fetchSource(include, exclude);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate that _source should be returned, with an
|
||||
* "include" and/or "exclude" set which can include simple wildcard
|
||||
* elements.
|
||||
*
|
||||
* @param includes
|
||||
* An optional list of include (optionally wildcarded) pattern to
|
||||
* filter the returned _source
|
||||
* @param excludes
|
||||
* An optional list of exclude (optionally wildcarded) pattern to
|
||||
* filter the returned _source
|
||||
*/
|
||||
public UpdateRequestBuilder setFetchSource(@Nullable String[] includes, @Nullable String[] excludes) {
|
||||
request.fetchSource(includes, excludes);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicates whether the response should contain the updated _source.
|
||||
*/
|
||||
public UpdateRequestBuilder setFetchSource(boolean fetchSource) {
|
||||
request.fetchSource(fetchSource);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the number of retries of a version conflict occurs because the document was updated between
|
||||
* getting it and updating it. Defaults to 0.
|
||||
|
@ -279,26 +329,6 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
|||
return this;
|
||||
}
|
||||
|
||||
public UpdateRequestBuilder setSource(XContentBuilder source) throws Exception {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
public UpdateRequestBuilder setSource(byte[] source) throws Exception {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
public UpdateRequestBuilder setSource(byte[] source, int offset, int length) throws Exception {
|
||||
request.source(source, offset, length);
|
||||
return this;
|
||||
}
|
||||
|
||||
public UpdateRequestBuilder setSource(BytesReference source) throws Exception {
|
||||
request.source(source);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets whether the specified doc parameter should be used as upsert document.
|
||||
*/
|
||||
|
|
|
@ -20,15 +20,19 @@
|
|||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.core.Appender;
|
||||
import org.apache.logging.log4j.core.appender.ConsoleAppender;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.apache.lucene.util.IOUtils;
|
||||
import org.apache.lucene.util.StringHelper;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.PidFile;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.inject.CreationException;
|
||||
import org.elasticsearch.common.logging.ESLoggerFactory;
|
||||
import org.elasticsearch.common.logging.LogConfigurator;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -38,12 +42,16 @@ import org.elasticsearch.monitor.jvm.JvmInfo;
|
|||
import org.elasticsearch.monitor.os.OsProbe;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
import org.elasticsearch.node.internal.InternalSettingsPreparer;
|
||||
|
||||
import java.io.ByteArrayOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.io.UnsupportedEncodingException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.nio.file.Path;
|
||||
import java.security.NoSuchAlgorithmException;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
|
||||
|
@ -142,7 +150,7 @@ final class Bootstrap {
|
|||
JvmInfo.jvmInfo();
|
||||
}
|
||||
|
||||
private void setup(boolean addShutdownHook, Environment environment) throws Exception {
|
||||
private void setup(boolean addShutdownHook, Environment environment) throws BootstrapException {
|
||||
Settings settings = environment.settings();
|
||||
initializeNatives(
|
||||
environment.tmpFile(),
|
||||
|
@ -166,15 +174,25 @@ final class Bootstrap {
|
|||
});
|
||||
}
|
||||
|
||||
// look for jar hell
|
||||
JarHell.checkJarHell();
|
||||
try {
|
||||
// look for jar hell
|
||||
JarHell.checkJarHell();
|
||||
} catch (IOException | URISyntaxException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
|
||||
// install SM after natives, shutdown hooks, etc.
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
try {
|
||||
Security.configure(environment, BootstrapSettings.SECURITY_FILTER_BAD_DEFAULTS_SETTING.get(settings));
|
||||
} catch (IOException | NoSuchAlgorithmException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
|
||||
node = new Node(environment) {
|
||||
@Override
|
||||
protected void validateNodeBeforeAcceptingRequests(Settings settings, BoundTransportAddress boundTransportAddress) {
|
||||
protected void validateNodeBeforeAcceptingRequests(
|
||||
final Settings settings,
|
||||
final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
|
||||
BootstrapCheck.check(settings, boundTransportAddress);
|
||||
}
|
||||
};
|
||||
|
@ -189,7 +207,7 @@ final class Bootstrap {
|
|||
return InternalSettingsPreparer.prepareEnvironment(builder.build(), terminal, esSettings);
|
||||
}
|
||||
|
||||
private void start() {
|
||||
private void start() throws NodeValidationException {
|
||||
node.start();
|
||||
keepAliveThread.start();
|
||||
}
|
||||
|
@ -210,13 +228,13 @@ final class Bootstrap {
|
|||
}
|
||||
|
||||
/**
|
||||
* This method is invoked by {@link Elasticsearch#main(String[])}
|
||||
* to startup elasticsearch.
|
||||
* This method is invoked by {@link Elasticsearch#main(String[])} to startup elasticsearch.
|
||||
*/
|
||||
static void init(
|
||||
final boolean foreground,
|
||||
final Path pidFile,
|
||||
final Map<String, String> esSettings) throws Exception {
|
||||
final boolean quiet,
|
||||
final Map<String, String> esSettings) throws BootstrapException, NodeValidationException, UserException {
|
||||
// Set the system property before anything has a chance to trigger its use
|
||||
initLoggerPrefix();
|
||||
|
||||
|
@ -227,16 +245,29 @@ final class Bootstrap {
|
|||
INSTANCE = new Bootstrap();
|
||||
|
||||
Environment environment = initialEnvironment(foreground, pidFile, esSettings);
|
||||
LogConfigurator.configure(environment, true);
|
||||
try {
|
||||
LogConfigurator.configure(environment, true);
|
||||
} catch (IOException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
checkForCustomConfFile();
|
||||
|
||||
if (environment.pidFile() != null) {
|
||||
PidFile.create(environment.pidFile(), true);
|
||||
try {
|
||||
PidFile.create(environment.pidFile(), true);
|
||||
} catch (IOException e) {
|
||||
throw new BootstrapException(e);
|
||||
}
|
||||
}
|
||||
|
||||
final boolean closeStandardStreams = (foreground == false) || quiet;
|
||||
try {
|
||||
if (!foreground) {
|
||||
Loggers.disableConsoleLogging();
|
||||
if (closeStandardStreams) {
|
||||
final Logger rootLogger = ESLoggerFactory.getRootLogger();
|
||||
final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
|
||||
if (maybeConsoleAppender != null) {
|
||||
Loggers.removeAppender(rootLogger, maybeConsoleAppender);
|
||||
}
|
||||
closeSystOut();
|
||||
}
|
||||
|
||||
|
@ -256,13 +287,15 @@ final class Bootstrap {
|
|||
|
||||
INSTANCE.start();
|
||||
|
||||
if (!foreground) {
|
||||
if (closeStandardStreams) {
|
||||
closeSysError();
|
||||
}
|
||||
} catch (Exception e) {
|
||||
} catch (NodeValidationException | RuntimeException e) {
|
||||
// disable console logging, so user does not see the exception twice (jvm will show it already)
|
||||
if (foreground) {
|
||||
Loggers.disableConsoleLogging();
|
||||
final Logger rootLogger = ESLoggerFactory.getRootLogger();
|
||||
final Appender maybeConsoleAppender = Loggers.findAppender(rootLogger, ConsoleAppender.class);
|
||||
if (foreground && maybeConsoleAppender != null) {
|
||||
Loggers.removeAppender(rootLogger, maybeConsoleAppender);
|
||||
}
|
||||
Logger logger = Loggers.getLogger(Bootstrap.class);
|
||||
if (INSTANCE.node != null) {
|
||||
|
@ -272,17 +305,30 @@ final class Bootstrap {
|
|||
if (e instanceof CreationException) {
|
||||
// guice: log the shortened exc to the log file
|
||||
ByteArrayOutputStream os = new ByteArrayOutputStream();
|
||||
PrintStream ps = new PrintStream(os, false, "UTF-8");
|
||||
PrintStream ps = null;
|
||||
try {
|
||||
ps = new PrintStream(os, false, "UTF-8");
|
||||
} catch (UnsupportedEncodingException uee) {
|
||||
assert false;
|
||||
e.addSuppressed(uee);
|
||||
}
|
||||
new StartupException(e).printStackTrace(ps);
|
||||
ps.flush();
|
||||
logger.error("Guice Exception: {}", os.toString("UTF-8"));
|
||||
try {
|
||||
logger.error("Guice Exception: {}", os.toString("UTF-8"));
|
||||
} catch (UnsupportedEncodingException uee) {
|
||||
assert false;
|
||||
e.addSuppressed(uee);
|
||||
}
|
||||
} else if (e instanceof NodeValidationException) {
|
||||
logger.error("node validation exception\n{}", e.getMessage());
|
||||
} else {
|
||||
// full exception
|
||||
logger.error("Exception", e);
|
||||
}
|
||||
// re-enable it if appropriate, so they can see any logging during the shutdown process
|
||||
if (foreground) {
|
||||
Loggers.enableConsoleLogging();
|
||||
if (foreground && maybeConsoleAppender != null) {
|
||||
Loggers.addAppender(rootLogger, maybeConsoleAppender);
|
||||
}
|
||||
|
||||
throw e;
|
||||
|
|
|
@ -23,6 +23,8 @@ import org.apache.logging.log4j.Logger;
|
|||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.apache.lucene.util.Constants;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.io.PathUtils;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
|
@ -32,6 +34,7 @@ import org.elasticsearch.common.transport.TransportAddress;
|
|||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.monitor.process.ProcessProbe;
|
||||
import org.elasticsearch.node.Node;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
|
||||
import java.io.BufferedReader;
|
||||
import java.io.IOException;
|
||||
|
@ -62,7 +65,7 @@ final class BootstrapCheck {
|
|||
* @param settings the current node settings
|
||||
* @param boundTransportAddress the node network bindings
|
||||
*/
|
||||
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) {
|
||||
static void check(final Settings settings, final BoundTransportAddress boundTransportAddress) throws NodeValidationException {
|
||||
check(
|
||||
enforceLimits(boundTransportAddress),
|
||||
BootstrapSettings.IGNORE_SYSTEM_BOOTSTRAP_CHECKS.get(settings),
|
||||
|
@ -82,7 +85,11 @@ final class BootstrapCheck {
|
|||
* @param nodeName the node name to be used as a logging prefix
|
||||
*/
|
||||
// visible for testing
|
||||
static void check(final boolean enforceLimits, final boolean ignoreSystemChecks, final List<Check> checks, final String nodeName) {
|
||||
static void check(
|
||||
final boolean enforceLimits,
|
||||
final boolean ignoreSystemChecks,
|
||||
final List<Check> checks,
|
||||
final String nodeName) throws NodeValidationException {
|
||||
check(enforceLimits, ignoreSystemChecks, checks, Loggers.getLogger(BootstrapCheck.class, nodeName));
|
||||
}
|
||||
|
||||
|
@ -101,7 +108,7 @@ final class BootstrapCheck {
|
|||
final boolean enforceLimits,
|
||||
final boolean ignoreSystemChecks,
|
||||
final List<Check> checks,
|
||||
final Logger logger) {
|
||||
final Logger logger) throws NodeValidationException {
|
||||
final List<String> errors = new ArrayList<>();
|
||||
final List<String> ignoredErrors = new ArrayList<>();
|
||||
|
||||
|
@ -130,9 +137,9 @@ final class BootstrapCheck {
|
|||
final List<String> messages = new ArrayList<>(1 + errors.size());
|
||||
messages.add("bootstrap checks failed");
|
||||
messages.addAll(errors);
|
||||
final RuntimeException re = new RuntimeException(String.join("\n", messages));
|
||||
errors.stream().map(IllegalStateException::new).forEach(re::addSuppressed);
|
||||
throw re;
|
||||
final NodeValidationException ne = new NodeValidationException(String.join("\n", messages));
|
||||
errors.stream().map(IllegalStateException::new).forEach(ne::addSuppressed);
|
||||
throw ne;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,43 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.bootstrap;
|
||||
|
||||
import java.nio.file.Path;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Wrapper exception for checked exceptions thrown during the bootstrap process. Methods invoked
|
||||
* during bootstrap should explicitly declare the checked exceptions that they can throw, rather
|
||||
* than declaring the top-level checked exception {@link Exception}. This exception exists to wrap
|
||||
* these checked exceptions so that {@link Bootstrap#init(boolean, Path, boolean, Map)} does not have to
|
||||
* declare all of these checked exceptions.
|
||||
*/
|
||||
class BootstrapException extends Exception {
|
||||
|
||||
/**
|
||||
* Wraps an existing exception.
|
||||
*
|
||||
* @param cause the underlying cause of bootstrap failing
|
||||
*/
|
||||
BootstrapException(final Exception cause) {
|
||||
super(cause);
|
||||
}
|
||||
|
||||
}
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.cli.SettingCommand;
|
|||
import org.elasticsearch.cli.Terminal;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.monitor.jvm.JvmInfo;
|
||||
import org.elasticsearch.node.NodeValidationException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.file.Path;
|
||||
|
@ -43,6 +44,7 @@ class Elasticsearch extends SettingCommand {
|
|||
private final OptionSpecBuilder versionOption;
|
||||
private final OptionSpecBuilder daemonizeOption;
|
||||
private final OptionSpec<Path> pidfileOption;
|
||||
private final OptionSpecBuilder quietOption;
|
||||
|
||||
// visible for testing
|
||||
Elasticsearch() {
|
||||
|
@ -57,6 +59,10 @@ class Elasticsearch extends SettingCommand {
|
|||
.availableUnless(versionOption)
|
||||
.withRequiredArg()
|
||||
.withValuesConvertedBy(new PathConverter());
|
||||
quietOption = parser.acceptsAll(Arrays.asList("q", "quiet"),
|
||||
"Turns off standard ouput/error streams logging in console")
|
||||
.availableUnless(versionOption)
|
||||
.availableUnless(daemonizeOption);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -75,7 +81,7 @@ class Elasticsearch extends SettingCommand {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws Exception {
|
||||
protected void execute(Terminal terminal, OptionSet options, Map<String, String> settings) throws UserException {
|
||||
if (options.nonOptionArguments().isEmpty() == false) {
|
||||
throw new UserException(ExitCodes.USAGE, "Positional arguments not allowed, found " + options.nonOptionArguments());
|
||||
}
|
||||
|
@ -91,17 +97,23 @@ class Elasticsearch extends SettingCommand {
|
|||
|
||||
final boolean daemonize = options.has(daemonizeOption);
|
||||
final Path pidFile = pidfileOption.value(options);
|
||||
final boolean quiet = options.has(quietOption);
|
||||
|
||||
init(daemonize, pidFile, settings);
|
||||
try {
|
||||
init(daemonize, pidFile, quiet, settings);
|
||||
} catch (NodeValidationException e) {
|
||||
throw new UserException(ExitCodes.CONFIG, e.getMessage());
|
||||
}
|
||||
}
|
||||
|
||||
void init(final boolean daemonize, final Path pidFile, final Map<String, String> esSettings) {
|
||||
void init(final boolean daemonize, final Path pidFile, final boolean quiet, final Map<String, String> esSettings)
|
||||
throws NodeValidationException, UserException {
|
||||
try {
|
||||
Bootstrap.init(!daemonize, pidFile, esSettings);
|
||||
} catch (final Throwable t) {
|
||||
Bootstrap.init(!daemonize, pidFile, quiet, esSettings);
|
||||
} catch (BootstrapException | RuntimeException e) {
|
||||
// format exceptions to the console in a special way
|
||||
// to avoid 2MB stacktraces from guice, etc.
|
||||
throw new StartupException(t);
|
||||
throw new StartupException(e);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -111,7 +123,8 @@ class Elasticsearch extends SettingCommand {
|
|||
*
|
||||
* http://commons.apache.org/proper/commons-daemon/procrun.html
|
||||
*
|
||||
* NOTE: If this method is renamed and/or moved, make sure to update service.bat!
|
||||
* NOTE: If this method is renamed and/or moved, make sure to
|
||||
* update elasticsearch-service.bat!
|
||||
*/
|
||||
static void close(String[] args) throws IOException {
|
||||
Bootstrap.stop();
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.net.MalformedURLException;
|
||||
import java.net.URISyntaxException;
|
||||
import java.net.URL;
|
||||
import java.net.URLClassLoader;
|
||||
import java.nio.file.FileVisitResult;
|
||||
|
@ -74,7 +75,7 @@ public class JarHell {
|
|||
* Checks the current classpath for duplicate classes
|
||||
* @throws IllegalStateException if jar hell was found
|
||||
*/
|
||||
public static void checkJarHell() throws Exception {
|
||||
public static void checkJarHell() throws IOException, URISyntaxException {
|
||||
ClassLoader loader = JarHell.class.getClassLoader();
|
||||
Logger logger = Loggers.getLogger(JarHell.class);
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
@ -149,7 +150,7 @@ public class JarHell {
|
|||
* @throws IllegalStateException if jar hell was found
|
||||
*/
|
||||
@SuppressForbidden(reason = "needs JarFile for speed, just reading entries")
|
||||
public static void checkJarHell(URL urls[]) throws Exception {
|
||||
public static void checkJarHell(URL urls[]) throws URISyntaxException, IOException {
|
||||
Logger logger = Loggers.getLogger(JarHell.class);
|
||||
// we don't try to be sneaky and use deprecated/internal/not portable stuff
|
||||
// like sun.boot.class.path, and with jigsaw we don't yet have a way to get
|
||||
|
@ -278,6 +279,12 @@ public class JarHell {
|
|||
* cf. https://issues.apache.org/jira/browse/LOG4J2-1560
|
||||
*/
|
||||
return;
|
||||
} else if (clazz.startsWith("org.apache.logging.log4j.core.jmx.Server")) {
|
||||
/*
|
||||
* deliberate to hack around a bug in Log4j
|
||||
* cf. https://issues.apache.org/jira/browse/LOG4J2-1506
|
||||
*/
|
||||
return;
|
||||
}
|
||||
throw new IllegalStateException("jar hell!" + System.lineSeparator() +
|
||||
"class: " + clazz + System.lineSeparator() +
|
||||
|
|
|
@ -114,13 +114,13 @@ final class Security {
|
|||
* @param environment configuration for generating dynamic permissions
|
||||
* @param filterBadDefaults true if we should filter out bad java defaults in the system policy.
|
||||
*/
|
||||
static void configure(Environment environment, boolean filterBadDefaults) throws Exception {
|
||||
static void configure(Environment environment, boolean filterBadDefaults) throws IOException, NoSuchAlgorithmException {
|
||||
|
||||
// enable security policy: union of template and environment-based paths, and possibly plugin permissions
|
||||
Policy.setPolicy(new ESPolicy(createPermissions(environment), getPluginPermissions(environment), filterBadDefaults));
|
||||
|
||||
// enable security manager
|
||||
System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap." }));
|
||||
System.setSecurityManager(new SecureSM(new String[] { "org.elasticsearch.bootstrap.", "org.elasticsearch.cli" }));
|
||||
|
||||
// do some basic tests
|
||||
selfTest();
|
||||
|
@ -257,11 +257,6 @@ final class Security {
|
|||
for (Path path : environment.dataFiles()) {
|
||||
addPath(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
// TODO: this should be removed in ES 6.0! We will no longer support data paths with the cluster as a folder
|
||||
assert Version.CURRENT.major < 6 : "cluster name is no longer used in data path";
|
||||
for (Path path : environment.dataWithClusterFiles()) {
|
||||
addPathIfExists(policy, Environment.PATH_DATA_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
for (Path path : environment.repoFiles()) {
|
||||
addPath(policy, Environment.PATH_REPO_SETTING.getKey(), path, "read,readlink,write,delete");
|
||||
}
|
||||
|
|
|
@ -136,7 +136,6 @@ public abstract class TransportClient extends AbstractClient {
|
|||
}
|
||||
modules.add(networkModule);
|
||||
modules.add(b -> b.bind(ThreadPool.class).toInstance(threadPool));
|
||||
modules.add(searchModule);
|
||||
ActionModule actionModule = new ActionModule(false, true, settings, null, settingsModule.getClusterSettings(),
|
||||
pluginsService.filterPlugins(ActionPlugin.class));
|
||||
modules.add(actionModule);
|
||||
|
|
|
@ -607,23 +607,21 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
|||
add = false;
|
||||
expression = expression.substring(1);
|
||||
}
|
||||
if (result == null) {
|
||||
// add all the previous ones...
|
||||
result = new HashSet<>(expressions.subList(0, i));
|
||||
}
|
||||
if (!Regex.isSimpleMatchPattern(expression)) {
|
||||
if (!unavailableIgnoredOrExists(options, metaData, expression)) {
|
||||
throw infe(expression);
|
||||
}
|
||||
if (result != null) {
|
||||
if (add) {
|
||||
result.add(expression);
|
||||
} else {
|
||||
result.remove(expression);
|
||||
}
|
||||
if (add) {
|
||||
result.add(expression);
|
||||
} else {
|
||||
result.remove(expression);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (result == null) {
|
||||
// add all the previous ones...
|
||||
result = new HashSet<>(expressions.subList(0, i));
|
||||
}
|
||||
|
||||
final IndexMetaData.State excludeState = excludeState(options);
|
||||
final Map<String, AliasOrIndex> matches = matches(metaData, expression);
|
||||
|
|
|
@ -21,7 +21,9 @@ package org.elasticsearch.cluster.metadata;
|
|||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.common.collect.MapBuilder;
|
||||
import org.elasticsearch.common.compress.CompressedXContent;
|
||||
|
@ -37,6 +39,7 @@ import org.elasticsearch.common.xcontent.XContentParser;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
|
@ -50,6 +53,26 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
|
||||
private final int order;
|
||||
|
||||
/**
|
||||
* The version is an arbitrary number managed by the user so that they can easily and quickly verify the existence of a given template.
|
||||
* Expected usage:
|
||||
* <pre><code>
|
||||
* PUT /_template/my_template
|
||||
* {
|
||||
* "template": "my_index-*",
|
||||
* "mappings": { ... },
|
||||
* "version": 1
|
||||
* }
|
||||
* </code></pre>
|
||||
* Then, some process from the user can occasionally verify that the template exists with the appropriate version without having to
|
||||
* check the template's content:
|
||||
* <pre><code>
|
||||
* GET /_template/my_template?filter_path=*.version
|
||||
* </code></pre>
|
||||
*/
|
||||
@Nullable
|
||||
private final Integer version;
|
||||
|
||||
private final String template;
|
||||
|
||||
private final Settings settings;
|
||||
|
@ -61,10 +84,14 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
|
||||
private final ImmutableOpenMap<String, IndexMetaData.Custom> customs;
|
||||
|
||||
public IndexTemplateMetaData(String name, int order, String template, Settings settings, ImmutableOpenMap<String, CompressedXContent> mappings,
|
||||
ImmutableOpenMap<String, AliasMetaData> aliases, ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
|
||||
public IndexTemplateMetaData(String name, int order, Integer version,
|
||||
String template, Settings settings,
|
||||
ImmutableOpenMap<String, CompressedXContent> mappings,
|
||||
ImmutableOpenMap<String, AliasMetaData> aliases,
|
||||
ImmutableOpenMap<String, IndexMetaData.Custom> customs) {
|
||||
this.name = name;
|
||||
this.order = order;
|
||||
this.version = version;
|
||||
this.template = template;
|
||||
this.settings = settings;
|
||||
this.mappings = mappings;
|
||||
|
@ -84,6 +111,16 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
return order();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Integer getVersion() {
|
||||
return version();
|
||||
}
|
||||
|
||||
@Nullable
|
||||
public Integer version() {
|
||||
return version;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
@ -150,13 +187,14 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
if (!settings.equals(that.settings)) return false;
|
||||
if (!template.equals(that.template)) return false;
|
||||
|
||||
return true;
|
||||
return Objects.equals(version, that.version);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int hashCode() {
|
||||
int result = name.hashCode();
|
||||
result = 31 * result + order;
|
||||
result = 31 * result + Objects.hashCode(version);
|
||||
result = 31 * result + template.hashCode();
|
||||
result = 31 * result + settings.hashCode();
|
||||
result = 31 * result + mappings.hashCode();
|
||||
|
@ -184,6 +222,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
IndexMetaData.Custom customIndexMetaData = IndexMetaData.lookupPrototypeSafe(type).readFrom(in);
|
||||
builder.putCustom(type, customIndexMetaData);
|
||||
}
|
||||
if (in.getVersion().onOrAfter(Version.V_5_0_0_alpha6)) {
|
||||
builder.version(in.readOptionalVInt());
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
@ -207,6 +248,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
out.writeString(cursor.key);
|
||||
cursor.value.writeTo(out);
|
||||
}
|
||||
if (out.getVersion().onOrAfter(Version.V_5_0_0_alpha6)) {
|
||||
out.writeOptionalVInt(version);
|
||||
}
|
||||
}
|
||||
|
||||
public static class Builder {
|
||||
|
@ -220,6 +264,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
|
||||
private int order;
|
||||
|
||||
private Integer version;
|
||||
|
||||
private String template;
|
||||
|
||||
private Settings settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
|
@ -240,6 +286,7 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
public Builder(IndexTemplateMetaData indexTemplateMetaData) {
|
||||
this.name = indexTemplateMetaData.name();
|
||||
order(indexTemplateMetaData.order());
|
||||
version(indexTemplateMetaData.version());
|
||||
template(indexTemplateMetaData.template());
|
||||
settings(indexTemplateMetaData.settings());
|
||||
|
||||
|
@ -253,6 +300,11 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder version(Integer version) {
|
||||
this.version = version;
|
||||
return this;
|
||||
}
|
||||
|
||||
public Builder template(String template) {
|
||||
this.template = template;
|
||||
return this;
|
||||
|
@ -312,14 +364,18 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
}
|
||||
|
||||
public IndexTemplateMetaData build() {
|
||||
return new IndexTemplateMetaData(name, order, template, settings, mappings.build(), aliases.build(), customs.build());
|
||||
return new IndexTemplateMetaData(name, order, version, template, settings, mappings.build(), aliases.build(), customs.build());
|
||||
}
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params) throws IOException {
|
||||
public static void toXContent(IndexTemplateMetaData indexTemplateMetaData, XContentBuilder builder, ToXContent.Params params)
|
||||
throws IOException {
|
||||
builder.startObject(indexTemplateMetaData.name());
|
||||
|
||||
builder.field("order", indexTemplateMetaData.order());
|
||||
if (indexTemplateMetaData.version() != null) {
|
||||
builder.field("version", indexTemplateMetaData.version());
|
||||
}
|
||||
builder.field("template", indexTemplateMetaData.template());
|
||||
|
||||
builder.startObject("settings");
|
||||
|
@ -380,7 +436,9 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
if ("settings".equals(currentFieldName)) {
|
||||
Settings.Builder templateSettingsBuilder = Settings.builder();
|
||||
templateSettingsBuilder.put(SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered())).normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
templateSettingsBuilder.put(
|
||||
SettingsLoader.Helper.loadNestedFromMap(parser.mapOrdered()))
|
||||
.normalizePrefix(IndexMetaData.INDEX_SETTING_PREFIX);
|
||||
builder.settings(templateSettingsBuilder.build());
|
||||
} else if ("mappings".equals(currentFieldName)) {
|
||||
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
|
||||
|
@ -388,7 +446,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
currentFieldName = parser.currentName();
|
||||
} else if (token == XContentParser.Token.START_OBJECT) {
|
||||
String mappingType = currentFieldName;
|
||||
Map<String, Object> mappingSource = MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
|
||||
Map<String, Object> mappingSource =
|
||||
MapBuilder.<String, Object>newMapBuilder().put(mappingType, parser.mapOrdered()).map();
|
||||
builder.putMapping(mappingType, XContentFactory.jsonBuilder().map(mappingSource).string());
|
||||
}
|
||||
}
|
||||
|
@ -428,6 +487,8 @@ public class IndexTemplateMetaData extends AbstractDiffable<IndexTemplateMetaDat
|
|||
builder.template(parser.text());
|
||||
} else if ("order".equals(currentFieldName)) {
|
||||
builder.order(parser.intValue());
|
||||
} else if ("version".equals(currentFieldName)) {
|
||||
builder.version(parser.intValue());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -500,15 +500,6 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
|||
validationErrors.add("custom path [" + customPath + "] is not a sub-path of path.shared_data [" + env.sharedDataFile() + "]");
|
||||
}
|
||||
}
|
||||
//norelease - this can be removed?
|
||||
Integer number_of_primaries = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_SHARDS, null);
|
||||
Integer number_of_replicas = settings.getAsInt(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, null);
|
||||
if (number_of_primaries != null && number_of_primaries <= 0) {
|
||||
validationErrors.add("index must have 1 or more primary shards");
|
||||
}
|
||||
if (number_of_replicas != null && number_of_replicas < 0) {
|
||||
validationErrors.add("index must have 0 or more replica shards");
|
||||
}
|
||||
return validationErrors;
|
||||
}
|
||||
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.elasticsearch.common.ValidationException;
|
|||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.IndexScopedSettings;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.index.Index;
|
||||
|
@ -63,15 +64,21 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
private final IndicesService indicesService;
|
||||
private final MetaDataCreateIndexService metaDataCreateIndexService;
|
||||
private final NodeServicesProvider nodeServicesProvider;
|
||||
private final IndexScopedSettings indexScopedSettings;
|
||||
|
||||
@Inject
|
||||
public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService, MetaDataCreateIndexService metaDataCreateIndexService, AliasValidator aliasValidator, IndicesService indicesService, NodeServicesProvider nodeServicesProvider) {
|
||||
public MetaDataIndexTemplateService(Settings settings, ClusterService clusterService,
|
||||
MetaDataCreateIndexService metaDataCreateIndexService,
|
||||
AliasValidator aliasValidator, IndicesService indicesService,
|
||||
NodeServicesProvider nodeServicesProvider,
|
||||
IndexScopedSettings indexScopedSettings) {
|
||||
super(settings);
|
||||
this.clusterService = clusterService;
|
||||
this.aliasValidator = aliasValidator;
|
||||
this.indicesService = indicesService;
|
||||
this.metaDataCreateIndexService = metaDataCreateIndexService;
|
||||
this.nodeServicesProvider = nodeServicesProvider;
|
||||
this.indexScopedSettings = indexScopedSettings;
|
||||
}
|
||||
|
||||
public void removeTemplates(final RemoveRequest request, final RemoveListener listener) {
|
||||
|
@ -204,6 +211,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
createdIndex = dummyIndexService.index();
|
||||
|
||||
templateBuilder.order(request.order);
|
||||
templateBuilder.version(request.version);
|
||||
templateBuilder.template(request.template);
|
||||
templateBuilder.settings(request.settings);
|
||||
|
||||
|
@ -259,6 +267,14 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
validationErrors.add("template must not contain the following characters " + Strings.INVALID_FILENAME_CHARS);
|
||||
}
|
||||
|
||||
try {
|
||||
indexScopedSettings.validate(request.settings);
|
||||
} catch (IllegalArgumentException iae) {
|
||||
validationErrors.add(iae.getMessage());
|
||||
for (Throwable t : iae.getSuppressed()) {
|
||||
validationErrors.add(t.getMessage());
|
||||
}
|
||||
}
|
||||
List<String> indexSettingsValidation = metaDataCreateIndexService.getIndexSettingsValidationErrors(request.settings);
|
||||
validationErrors.addAll(indexSettingsValidation);
|
||||
if (!validationErrors.isEmpty()) {
|
||||
|
@ -288,6 +304,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
final String cause;
|
||||
boolean create;
|
||||
int order;
|
||||
Integer version;
|
||||
String template;
|
||||
Settings settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
Map<String, String> mappings = new HashMap<>();
|
||||
|
@ -345,6 +362,11 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
|||
this.masterTimeout = masterTimeout;
|
||||
return this;
|
||||
}
|
||||
|
||||
public PutRequest version(Integer version) {
|
||||
this.version = version;
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
||||
public static class PutResponse {
|
||||
|
|
|
@ -100,16 +100,7 @@ public class MetaDataIndexUpgradeService extends AbstractComponent {
|
|||
* Returns true if this index can be supported by the current version of elasticsearch
|
||||
*/
|
||||
private static boolean isSupportedVersion(IndexMetaData indexMetaData) {
|
||||
if (indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1)) {
|
||||
// The index was created with elasticsearch that was using Lucene 5.2.1
|
||||
return true;
|
||||
}
|
||||
if (indexMetaData.getMinimumCompatibleVersion() != null &&
|
||||
indexMetaData.getMinimumCompatibleVersion().onOrAfter(org.apache.lucene.util.Version.LUCENE_5_0_0)) {
|
||||
//The index was upgraded we can work with it
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
return indexMetaData.getCreationVersion().onOrAfter(Version.V_2_0_0_beta1);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -96,7 +96,7 @@ public class DiscoveryNode implements Writeable, ToXContent {
|
|||
* @param version the version of the node
|
||||
*/
|
||||
public DiscoveryNode(final String id, TransportAddress address, Version version) {
|
||||
this(id, address, Collections.emptyMap(), Collections.emptySet(), version);
|
||||
this(id, address, Collections.emptyMap(), EnumSet.allOf(Role.class), version);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo.AllocationStatus;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.Decision.Type;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* Represents the allocation decision by an allocator for an unassigned shard.
|
||||
*/
|
||||
public class UnassignedShardDecision {
|
||||
/** a constant representing a shard decision where no decision was taken */
|
||||
public static final UnassignedShardDecision DECISION_NOT_TAKEN =
|
||||
new UnassignedShardDecision(null, null, null, null, null, null);
|
||||
|
||||
@Nullable
|
||||
private final Decision finalDecision;
|
||||
@Nullable
|
||||
private final AllocationStatus allocationStatus;
|
||||
@Nullable
|
||||
private final String finalExplanation;
|
||||
@Nullable
|
||||
private final String assignedNodeId;
|
||||
@Nullable
|
||||
private final String allocationId;
|
||||
@Nullable
|
||||
private final Map<String, Decision> nodeDecisions;
|
||||
|
||||
private UnassignedShardDecision(Decision finalDecision,
|
||||
AllocationStatus allocationStatus,
|
||||
String finalExplanation,
|
||||
String assignedNodeId,
|
||||
String allocationId,
|
||||
Map<String, Decision> nodeDecisions) {
|
||||
assert finalExplanation != null || finalDecision == null :
|
||||
"if a decision was taken, there must be an explanation for it";
|
||||
assert assignedNodeId != null || finalDecision == null || finalDecision.type() != Type.YES :
|
||||
"a yes decision must have a node to assign the shard to";
|
||||
assert allocationStatus != null || finalDecision == null || finalDecision.type() == Type.YES :
|
||||
"only a yes decision should not have an allocation status";
|
||||
assert allocationId == null || assignedNodeId != null :
|
||||
"allocation id can only be null if the assigned node is null";
|
||||
this.finalDecision = finalDecision;
|
||||
this.allocationStatus = allocationStatus;
|
||||
this.finalExplanation = finalExplanation;
|
||||
this.assignedNodeId = assignedNodeId;
|
||||
this.allocationId = allocationId;
|
||||
this.nodeDecisions = nodeDecisions != null ? Collections.unmodifiableMap(nodeDecisions) : null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision.
|
||||
*/
|
||||
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus, String explanation) {
|
||||
return noDecision(allocationStatus, explanation, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a NO decision with the given {@link AllocationStatus} and explanation for the NO decision,
|
||||
* as well as the individual node-level decisions that comprised the final NO decision.
|
||||
*/
|
||||
public static UnassignedShardDecision noDecision(AllocationStatus allocationStatus,
|
||||
String explanation,
|
||||
@Nullable Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(explanation, "explanation must not be null");
|
||||
Objects.requireNonNull(allocationStatus, "allocationStatus must not be null");
|
||||
return new UnassignedShardDecision(Decision.NO, allocationStatus, explanation, null, null, nodeDecisions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a THROTTLE decision with the given explanation and individual node-level decisions that
|
||||
* comprised the final THROTTLE decision.
|
||||
*/
|
||||
public static UnassignedShardDecision throttleDecision(String explanation,
|
||||
Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(explanation, "explanation must not be null");
|
||||
return new UnassignedShardDecision(Decision.THROTTLE, AllocationStatus.DECIDERS_THROTTLED, explanation, null, null,
|
||||
nodeDecisions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a YES decision with the given explanation and individual node-level decisions that
|
||||
* comprised the final YES decision, along with the node id to which the shard is assigned and
|
||||
* the allocation id for the shard, if available.
|
||||
*/
|
||||
public static UnassignedShardDecision yesDecision(String explanation,
|
||||
String assignedNodeId,
|
||||
@Nullable String allocationId,
|
||||
Map<String, Decision> nodeDecisions) {
|
||||
Objects.requireNonNull(explanation, "explanation must not be null");
|
||||
Objects.requireNonNull(assignedNodeId, "assignedNodeId must not be null");
|
||||
return new UnassignedShardDecision(Decision.YES, null, explanation, assignedNodeId, allocationId, nodeDecisions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns <code>true</code> if a decision was taken by the allocator, {@code false} otherwise.
|
||||
* If no decision was taken, then the rest of the fields in this object are meaningless and return {@code null}.
|
||||
*/
|
||||
public boolean isDecisionTaken() {
|
||||
return finalDecision != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
|
||||
* This value can only be {@code null} if {@link #isDecisionTaken()} returns {@code false}.
|
||||
*/
|
||||
@Nullable
|
||||
public Decision getFinalDecision() {
|
||||
return finalDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the final decision made by the allocator on whether to assign the unassigned shard.
|
||||
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
|
||||
* throw an {@code IllegalArgumentException}.
|
||||
*/
|
||||
public Decision getFinalDecisionSafe() {
|
||||
if (isDecisionTaken() == false) {
|
||||
throw new IllegalArgumentException("decision must have been taken in order to return the final decision");
|
||||
}
|
||||
return finalDecision;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the status of an unsuccessful allocation attempt. This value will be {@code null} if
|
||||
* no decision was taken or if the decision was {@link Decision.Type#YES}.
|
||||
*/
|
||||
@Nullable
|
||||
public AllocationStatus getAllocationStatus() {
|
||||
return allocationStatus;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getFinalExplanation() {
|
||||
return finalExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the free-text explanation for the reason behind the decision taken in {@link #getFinalDecision()}.
|
||||
* Only call this method if {@link #isDecisionTaken()} returns {@code true}, otherwise it will
|
||||
* throw an {@code IllegalArgumentException}.
|
||||
*/
|
||||
public String getFinalExplanationSafe() {
|
||||
if (isDecisionTaken() == false) {
|
||||
throw new IllegalArgumentException("decision must have been taken in order to return the final explanation");
|
||||
}
|
||||
return finalExplanation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the node id that the allocator will assign the shard to, unless {@link #getFinalDecision()} returns
|
||||
* a value other than {@link Decision.Type#YES}, in which case this returns {@code null}.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAssignedNodeId() {
|
||||
return assignedNodeId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the allocation id for the existing shard copy that the allocator is assigning the shard to.
|
||||
* This method returns a non-null value iff {@link #getAssignedNodeId()} returns a non-null value
|
||||
* and the node on which the shard is assigned already has a shard copy with an in-sync allocation id
|
||||
* that we can re-use.
|
||||
*/
|
||||
@Nullable
|
||||
public String getAllocationId() {
|
||||
return allocationId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the individual node-level decisions that went into making the final decision as represented by
|
||||
* {@link #getFinalDecision()}. The map that is returned has the node id as the key and a {@link Decision}
|
||||
* as the decision for the given node.
|
||||
*/
|
||||
@Nullable
|
||||
public Map<String, Decision> getNodeDecisions() {
|
||||
return nodeDecisions;
|
||||
}
|
||||
}
|
|
@ -74,7 +74,7 @@ public class AllocationDeciders extends AllocationDecider {
|
|||
// short track if a NO is returned.
|
||||
if (decision == Decision.NO) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.nodeId(), allocationDecider.getClass().getSimpleName());
|
||||
logger.trace("Can not allocate [{}] on node [{}] due to [{}]", shardRouting, node.node(), allocationDecider.getClass().getSimpleName());
|
||||
}
|
||||
// short circuit only if debugging is not enabled
|
||||
if (!allocation.debugDecision()) {
|
||||
|
|
|
@ -19,8 +19,15 @@
|
|||
|
||||
package org.elasticsearch.common.geo;
|
||||
|
||||
import org.apache.lucene.document.LatLonDocValuesField;
|
||||
import org.apache.lucene.document.LatLonPoint;
|
||||
import org.apache.lucene.geo.GeoEncodingUtils;
|
||||
import org.apache.lucene.index.IndexableField;
|
||||
import org.apache.lucene.spatial.geopoint.document.GeoPointField;
|
||||
import org.apache.lucene.util.BitUtil;
|
||||
import org.apache.lucene.util.BytesRef;
|
||||
|
||||
import java.util.Arrays;
|
||||
|
||||
import static org.elasticsearch.common.geo.GeoHashUtils.mortonEncode;
|
||||
import static org.elasticsearch.common.geo.GeoHashUtils.stringEncode;
|
||||
|
@ -88,6 +95,24 @@ public final class GeoPoint {
|
|||
return this;
|
||||
}
|
||||
|
||||
// todo this is a crutch because LatLonPoint doesn't have a helper for returning .stringValue()
|
||||
// todo remove with next release of lucene
|
||||
public GeoPoint resetFromIndexableField(IndexableField field) {
|
||||
if (field instanceof LatLonPoint) {
|
||||
BytesRef br = field.binaryValue();
|
||||
byte[] bytes = Arrays.copyOfRange(br.bytes, br.offset, br.length);
|
||||
return this.reset(
|
||||
GeoEncodingUtils.decodeLatitude(bytes, 0),
|
||||
GeoEncodingUtils.decodeLongitude(bytes, Integer.BYTES));
|
||||
} else if (field instanceof LatLonDocValuesField) {
|
||||
long encoded = (long)(field.numericValue());
|
||||
return this.reset(
|
||||
GeoEncodingUtils.decodeLatitude((int)(encoded >>> 32)),
|
||||
GeoEncodingUtils.decodeLongitude((int)encoded));
|
||||
}
|
||||
return resetFromIndexHash(Long.parseLong(field.stringValue()));
|
||||
}
|
||||
|
||||
public GeoPoint resetFromGeoHash(String geohash) {
|
||||
final long hash = mortonEncode(geohash);
|
||||
return this.reset(GeoPointField.decodeLatitude(hash), GeoPointField.decodeLongitude(hash));
|
||||
|
|
|
@ -25,10 +25,6 @@ import java.util.HashMap;
|
|||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
import java.util.stream.Stream;
|
||||
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
|
||||
/**
|
||||
* A registry for {@link org.elasticsearch.common.io.stream.Writeable.Reader} readers of {@link NamedWriteable}.
|
||||
|
@ -47,7 +43,7 @@ public class NamedWriteableRegistry {
|
|||
/** A name for the writeable which is unique to the {@link #categoryClass}. */
|
||||
public final String name;
|
||||
|
||||
/** A reader captability of reading*/
|
||||
/** A reader capability of reading*/
|
||||
public final Writeable.Reader<?> reader;
|
||||
|
||||
/** Creates a new entry which can be stored by the registry. */
|
||||
|
|
|
@ -871,6 +871,16 @@ public abstract class StreamOutput extends OutputStream {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a list of strings
|
||||
*/
|
||||
public void writeStringList(List<String> list) throws IOException {
|
||||
writeVInt(list.size());
|
||||
for (String string: list) {
|
||||
this.writeString(string);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Writes a list of {@link NamedWriteable} objects.
|
||||
*/
|
||||
|
|
|
@ -22,17 +22,18 @@ package org.elasticsearch.common.logging;
|
|||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.message.MessageFactory;
|
||||
import org.apache.logging.log4j.spi.ExtendedLogger;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
import org.elasticsearch.common.settings.Setting.Property;
|
||||
|
||||
import java.util.Locale;
|
||||
import java.util.function.Function;
|
||||
|
||||
/**
|
||||
* Factory to get {@link Logger}s
|
||||
*/
|
||||
public abstract class ESLoggerFactory {
|
||||
public final class ESLoggerFactory {
|
||||
|
||||
private ESLoggerFactory() {
|
||||
|
||||
}
|
||||
|
||||
public static final Setting<Level> LOG_DEFAULT_LEVEL_SETTING =
|
||||
new Setting<>("logger.level", Level.INFO.name(), Level::valueOf, Property.NodeScope);
|
||||
|
@ -41,40 +42,27 @@ public abstract class ESLoggerFactory {
|
|||
Property.Dynamic, Property.NodeScope);
|
||||
|
||||
public static Logger getLogger(String prefix, String name) {
|
||||
name = name.intern();
|
||||
final Logger logger = getLogger(new PrefixMessageFactory(), name);
|
||||
final MessageFactory factory = logger.getMessageFactory();
|
||||
// in some cases, we initialize the logger before we are ready to set the prefix
|
||||
// we can not re-initialize the logger, so the above getLogger might return an existing
|
||||
// instance without the prefix set; thus, we hack around this by resetting the prefix
|
||||
if (prefix != null && factory instanceof PrefixMessageFactory) {
|
||||
((PrefixMessageFactory) factory).setPrefix(prefix.intern());
|
||||
}
|
||||
return logger;
|
||||
return getLogger(prefix, LogManager.getLogger(name));
|
||||
}
|
||||
|
||||
public static Logger getLogger(MessageFactory messageFactory, String name) {
|
||||
return LogManager.getLogger(name, messageFactory);
|
||||
public static Logger getLogger(String prefix, Class<?> clazz) {
|
||||
return getLogger(prefix, LogManager.getLogger(clazz));
|
||||
}
|
||||
|
||||
public static Logger getLogger(String prefix, Logger logger) {
|
||||
return new PrefixLogger((ExtendedLogger)logger, logger.getName(), prefix);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz) {
|
||||
return getLogger(null, clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String name) {
|
||||
return getLogger((String)null, name);
|
||||
}
|
||||
|
||||
public static DeprecationLogger getDeprecationLogger(String name) {
|
||||
return new DeprecationLogger(getLogger(name));
|
||||
}
|
||||
|
||||
public static DeprecationLogger getDeprecationLogger(String prefix, String name) {
|
||||
return new DeprecationLogger(getLogger(prefix, name));
|
||||
return getLogger(null, name);
|
||||
}
|
||||
|
||||
public static Logger getRootLogger() {
|
||||
return LogManager.getRootLogger();
|
||||
}
|
||||
|
||||
private ESLoggerFactory() {
|
||||
// Utility class can't be built.
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -30,7 +30,8 @@ import org.apache.logging.log4j.core.config.builder.impl.BuiltConfiguration;
|
|||
import org.apache.logging.log4j.core.config.composite.CompositeConfiguration;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfiguration;
|
||||
import org.apache.logging.log4j.core.config.properties.PropertiesConfigurationFactory;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cli.ExitCodes;
|
||||
import org.elasticsearch.cli.UserException;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.common.SuppressForbidden;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
@ -44,7 +45,6 @@ import java.nio.file.Path;
|
|||
import java.nio.file.SimpleFileVisitor;
|
||||
import java.nio.file.attribute.BasicFileAttributes;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.EnumSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
@ -52,7 +52,7 @@ import java.util.Set;
|
|||
|
||||
public class LogConfigurator {
|
||||
|
||||
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException {
|
||||
public static void configure(final Environment environment, final boolean resolveConfig) throws IOException, UserException {
|
||||
final Settings settings = environment.settings();
|
||||
|
||||
setLogConfigurationSystemProperty(environment, settings);
|
||||
|
@ -77,45 +77,28 @@ public class LogConfigurator {
|
|||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
|
||||
if (configurations.isEmpty()) {
|
||||
throw new UserException(
|
||||
ExitCodes.CONFIG,
|
||||
"no log4j2.properties found; tried [" + environment.configFile() + "] and its subdirectories");
|
||||
}
|
||||
|
||||
context.start(new CompositeConfiguration(configurations));
|
||||
warnIfOldConfigurationFilePresent(environment);
|
||||
}
|
||||
|
||||
if (ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.exists(settings)) {
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings));
|
||||
final Level level = ESLoggerFactory.LOG_DEFAULT_LEVEL_SETTING.get(settings);
|
||||
Loggers.setLevel(ESLoggerFactory.getRootLogger(), level);
|
||||
}
|
||||
|
||||
final Map<String, String> levels = settings.filter(ESLoggerFactory.LOG_LEVEL_SETTING::match).getAsMap();
|
||||
for (String key : levels.keySet()) {
|
||||
final Level level = ESLoggerFactory.LOG_LEVEL_SETTING.getConcreteSetting(key).get(settings);
|
||||
Loggers.setLevel(Loggers.getLogger(key.substring("logger.".length())), level);
|
||||
Loggers.setLevel(ESLoggerFactory.getLogger(key.substring("logger.".length())), level);
|
||||
}
|
||||
}
|
||||
|
||||
private static void warnIfOldConfigurationFilePresent(final Environment environment) throws IOException {
|
||||
// TODO: the warning for unsupported logging configurations can be removed in 6.0.0
|
||||
assert Version.CURRENT.major < 6;
|
||||
final List<String> suffixes = Arrays.asList(".yml", ".yaml", ".json", ".properties");
|
||||
final Set<FileVisitOption> options = EnumSet.of(FileVisitOption.FOLLOW_LINKS);
|
||||
Files.walkFileTree(environment.configFile(), options, Integer.MAX_VALUE, new SimpleFileVisitor<Path>() {
|
||||
@Override
|
||||
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
|
||||
final String fileName = file.getFileName().toString();
|
||||
if (fileName.startsWith("logging")) {
|
||||
for (final String suffix : suffixes) {
|
||||
if (fileName.endsWith(suffix)) {
|
||||
Loggers.getLogger(LogConfigurator.class).warn(
|
||||
"ignoring unsupported logging configuration file [{}], logging is configured via [{}]",
|
||||
file.toString(),
|
||||
file.getParent().resolve("log4j2.properties"));
|
||||
}
|
||||
}
|
||||
}
|
||||
return FileVisitResult.CONTINUE;
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
@SuppressForbidden(reason = "sets system property for logging configuration")
|
||||
private static void setLogConfigurationSystemProperty(final Environment environment, final Settings settings) {
|
||||
System.setProperty("es.logs", environment.logsFile().resolve(ClusterName.CLUSTER_NAME_SETTING.get(settings).value()).toString());
|
||||
|
|
|
@ -20,7 +20,9 @@
|
|||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.LogManager;
|
||||
import org.apache.logging.log4j.Logger;
|
||||
import org.apache.logging.log4j.core.Appender;
|
||||
import org.apache.logging.log4j.core.LoggerContext;
|
||||
import org.apache.logging.log4j.core.config.Configuration;
|
||||
import org.apache.logging.log4j.core.config.Configurator;
|
||||
|
@ -33,9 +35,12 @@ import org.elasticsearch.index.shard.ShardId;
|
|||
import org.elasticsearch.node.Node;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static java.util.Arrays.asList;
|
||||
import static javax.security.auth.login.Configuration.getConfiguration;
|
||||
import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
|
||||
|
||||
/**
|
||||
|
@ -43,24 +48,8 @@ import static org.elasticsearch.common.util.CollectionUtils.asArrayList;
|
|||
*/
|
||||
public class Loggers {
|
||||
|
||||
private static final String commonPrefix = System.getProperty("es.logger.prefix", "org.elasticsearch.");
|
||||
|
||||
public static final String SPACE = " ";
|
||||
|
||||
private static boolean consoleLoggingEnabled = true;
|
||||
|
||||
public static void disableConsoleLogging() {
|
||||
consoleLoggingEnabled = false;
|
||||
}
|
||||
|
||||
public static void enableConsoleLogging() {
|
||||
consoleLoggingEnabled = true;
|
||||
}
|
||||
|
||||
public static boolean consoleLoggingEnabled() {
|
||||
return consoleLoggingEnabled;
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, Settings settings, ShardId shardId, String... prefixes) {
|
||||
return getLogger(clazz, settings, shardId.getIndex(), asArrayList(Integer.toString(shardId.id()), prefixes).toArray(new String[0]));
|
||||
}
|
||||
|
@ -79,10 +68,16 @@ public class Loggers {
|
|||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, Settings settings, String... prefixes) {
|
||||
return getLogger(buildClassLoggerName(clazz), settings, prefixes);
|
||||
final List<String> prefixesList = prefixesList(settings, prefixes);
|
||||
return getLogger(clazz, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
}
|
||||
|
||||
public static Logger getLogger(String loggerName, Settings settings, String... prefixes) {
|
||||
final List<String> prefixesList = prefixesList(settings, prefixes);
|
||||
return getLogger(loggerName, prefixesList.toArray(new String[prefixesList.size()]));
|
||||
}
|
||||
|
||||
private static List<String> prefixesList(Settings settings, String... prefixes) {
|
||||
List<String> prefixesList = new ArrayList<>();
|
||||
if (Node.NODE_NAME_SETTING.exists(settings)) {
|
||||
prefixesList.add(Node.NODE_NAME_SETTING.get(settings));
|
||||
|
@ -90,26 +85,31 @@ public class Loggers {
|
|||
if (prefixes != null && prefixes.length > 0) {
|
||||
prefixesList.addAll(asList(prefixes));
|
||||
}
|
||||
return getLogger(getLoggerName(loggerName), prefixesList.toArray(new String[prefixesList.size()]));
|
||||
return prefixesList;
|
||||
}
|
||||
|
||||
public static Logger getLogger(Logger parentLogger, String s) {
|
||||
return ESLoggerFactory.getLogger(parentLogger.<MessageFactory>getMessageFactory(), getLoggerName(parentLogger.getName() + s));
|
||||
assert parentLogger instanceof PrefixLogger;
|
||||
return ESLoggerFactory.getLogger(((PrefixLogger)parentLogger).prefix(), parentLogger.getName() + s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String s) {
|
||||
return ESLoggerFactory.getLogger(getLoggerName(s));
|
||||
return ESLoggerFactory.getLogger(s);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz) {
|
||||
return ESLoggerFactory.getLogger(getLoggerName(buildClassLoggerName(clazz)));
|
||||
return ESLoggerFactory.getLogger(clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(Class<?> clazz, String... prefixes) {
|
||||
return getLogger(buildClassLoggerName(clazz), prefixes);
|
||||
return ESLoggerFactory.getLogger(formatPrefix(prefixes), clazz);
|
||||
}
|
||||
|
||||
public static Logger getLogger(String name, String... prefixes) {
|
||||
return ESLoggerFactory.getLogger(formatPrefix(prefixes), name);
|
||||
}
|
||||
|
||||
private static String formatPrefix(String... prefixes) {
|
||||
String prefix = null;
|
||||
if (prefixes != null && prefixes.length > 0) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
|
@ -127,7 +127,7 @@ public class Loggers {
|
|||
prefix = sb.toString();
|
||||
}
|
||||
}
|
||||
return ESLoggerFactory.getLogger(prefix, getLoggerName(name));
|
||||
return prefix;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -145,30 +145,60 @@ public class Loggers {
|
|||
}
|
||||
|
||||
public static void setLevel(Logger logger, Level level) {
|
||||
if (!"".equals(logger.getName())) {
|
||||
if (!LogManager.ROOT_LOGGER_NAME.equals(logger.getName())) {
|
||||
Configurator.setLevel(logger.getName(), level);
|
||||
} else {
|
||||
LoggerContext ctx = LoggerContext.getContext(false);
|
||||
Configuration config = ctx.getConfiguration();
|
||||
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
|
||||
final LoggerContext ctx = LoggerContext.getContext(false);
|
||||
final Configuration config = ctx.getConfiguration();
|
||||
final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
|
||||
loggerConfig.setLevel(level);
|
||||
ctx.updateLoggers();
|
||||
}
|
||||
|
||||
// we have to descend the hierarchy
|
||||
final LoggerContext ctx = LoggerContext.getContext(false);
|
||||
for (final LoggerConfig loggerConfig : ctx.getConfiguration().getLoggers().values()) {
|
||||
if (LogManager.ROOT_LOGGER_NAME.equals(logger.getName()) || loggerConfig.getName().startsWith(logger.getName() + ".")) {
|
||||
Configurator.setLevel(loggerConfig.getName(), level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static String buildClassLoggerName(Class<?> clazz) {
|
||||
String name = clazz.getName();
|
||||
if (name.startsWith("org.elasticsearch.")) {
|
||||
name = Classes.getPackageName(clazz);
|
||||
public static void addAppender(final Logger logger, final Appender appender) {
|
||||
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
|
||||
final Configuration config = ctx.getConfiguration();
|
||||
config.addAppender(appender);
|
||||
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
|
||||
if (!logger.getName().equals(loggerConfig.getName())) {
|
||||
loggerConfig = new LoggerConfig(logger.getName(), logger.getLevel(), true);
|
||||
config.addLogger(logger.getName(), loggerConfig);
|
||||
}
|
||||
return name;
|
||||
loggerConfig.addAppender(appender, null, null);
|
||||
ctx.updateLoggers();
|
||||
}
|
||||
|
||||
private static String getLoggerName(String name) {
|
||||
if (name.startsWith("org.elasticsearch.")) {
|
||||
name = name.substring("org.elasticsearch.".length());
|
||||
public static void removeAppender(final Logger logger, final Appender appender) {
|
||||
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
|
||||
final Configuration config = ctx.getConfiguration();
|
||||
LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
|
||||
if (!logger.getName().equals(loggerConfig.getName())) {
|
||||
loggerConfig = new LoggerConfig(logger.getName(), logger.getLevel(), true);
|
||||
config.addLogger(logger.getName(), loggerConfig);
|
||||
}
|
||||
return commonPrefix + name;
|
||||
loggerConfig.removeAppender(appender.getName());
|
||||
ctx.updateLoggers();
|
||||
}
|
||||
|
||||
public static Appender findAppender(final Logger logger, final Class<? extends Appender> clazz) {
|
||||
final LoggerContext ctx = (LoggerContext) LogManager.getContext(false);
|
||||
final Configuration config = ctx.getConfiguration();
|
||||
final LoggerConfig loggerConfig = config.getLoggerConfig(logger.getName());
|
||||
for (final Map.Entry<String, Appender> entry : loggerConfig.getAppenders().entrySet()) {
|
||||
if (entry.getValue().getClass().equals(clazz)) {
|
||||
return entry.getValue();
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.logging.log4j.Level;
|
||||
import org.apache.logging.log4j.Marker;
|
||||
import org.apache.logging.log4j.MarkerManager;
|
||||
import org.apache.logging.log4j.message.Message;
|
||||
import org.apache.logging.log4j.spi.ExtendedLogger;
|
||||
import org.apache.logging.log4j.spi.ExtendedLoggerWrapper;
|
||||
|
||||
import java.lang.ref.WeakReference;
|
||||
import java.util.WeakHashMap;
|
||||
|
||||
class PrefixLogger extends ExtendedLoggerWrapper {
|
||||
|
||||
// we can not use the built-in Marker tracking (MarkerManager) because the MarkerManager holds
|
||||
// a permanent reference to the marker; however, we have transient markers from index-level and
|
||||
// shard-level components so this would effectively be a memory leak
|
||||
private static final WeakHashMap<String, WeakReference<Marker>> markers = new WeakHashMap<>();
|
||||
|
||||
private final Marker marker;
|
||||
|
||||
public String prefix() {
|
||||
return marker.getName();
|
||||
}
|
||||
|
||||
PrefixLogger(final ExtendedLogger logger, final String name, final String prefix) {
|
||||
super(logger, name, null);
|
||||
|
||||
final String actualPrefix = (prefix == null ? "" : prefix).intern();
|
||||
final Marker actualMarker;
|
||||
// markers is not thread-safe, so we synchronize access
|
||||
synchronized (markers) {
|
||||
final WeakReference<Marker> marker = markers.get(actualPrefix);
|
||||
final Marker maybeMarker = marker == null ? null : marker.get();
|
||||
if (maybeMarker == null) {
|
||||
actualMarker = new MarkerManager.Log4jMarker(actualPrefix);
|
||||
markers.put(actualPrefix, new WeakReference<>(actualMarker));
|
||||
} else {
|
||||
actualMarker = maybeMarker;
|
||||
}
|
||||
}
|
||||
this.marker = actualMarker;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void logMessage(final String fqcn, final Level level, final Marker marker, final Message message, final Throwable t) {
|
||||
assert marker == null;
|
||||
super.logMessage(fqcn, level, this.marker, message, t);
|
||||
}
|
||||
|
||||
}
|
|
@ -1,221 +0,0 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.common.logging;
|
||||
|
||||
import org.apache.logging.log4j.message.Message;
|
||||
import org.apache.logging.log4j.message.MessageFactory2;
|
||||
import org.apache.logging.log4j.message.ObjectMessage;
|
||||
import org.apache.logging.log4j.message.ParameterizedMessage;
|
||||
import org.apache.logging.log4j.message.SimpleMessage;
|
||||
|
||||
public class PrefixMessageFactory implements MessageFactory2 {
|
||||
|
||||
private String prefix = "";
|
||||
|
||||
public String getPrefix() {
|
||||
return prefix;
|
||||
}
|
||||
|
||||
public void setPrefix(String prefix) {
|
||||
this.prefix = prefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(Object message) {
|
||||
return new PrefixObjectMessage(prefix, message);
|
||||
}
|
||||
|
||||
private static class PrefixObjectMessage extends ObjectMessage {
|
||||
|
||||
private final String prefix;
|
||||
private final Object object;
|
||||
private String prefixObjectString;
|
||||
|
||||
private PrefixObjectMessage(String prefix, Object object) {
|
||||
super(object);
|
||||
this.prefix = prefix;
|
||||
this.object = object;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFormattedMessage() {
|
||||
if (prefixObjectString == null) {
|
||||
prefixObjectString = prefix + super.getFormattedMessage();
|
||||
}
|
||||
return prefixObjectString;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void formatTo(StringBuilder buffer) {
|
||||
buffer.append(prefix);
|
||||
super.formatTo(buffer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object[] getParameters() {
|
||||
return new Object[]{prefix, object};
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message) {
|
||||
return new PrefixSimpleMessage(prefix, message);
|
||||
}
|
||||
|
||||
private static class PrefixSimpleMessage extends SimpleMessage {
|
||||
|
||||
private final String prefix;
|
||||
private String prefixMessage;
|
||||
|
||||
PrefixSimpleMessage(String prefix, String message) {
|
||||
super(message);
|
||||
this.prefix = prefix;
|
||||
}
|
||||
|
||||
PrefixSimpleMessage(String prefix, CharSequence charSequence) {
|
||||
super(charSequence);
|
||||
this.prefix = prefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFormattedMessage() {
|
||||
if (prefixMessage == null) {
|
||||
prefixMessage = prefix + super.getFormattedMessage();
|
||||
}
|
||||
return prefixMessage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void formatTo(StringBuilder buffer) {
|
||||
buffer.append(prefix);
|
||||
super.formatTo(buffer);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int length() {
|
||||
return prefixMessage.length();
|
||||
}
|
||||
|
||||
@Override
|
||||
public char charAt(int index) {
|
||||
return prefixMessage.charAt(index);
|
||||
}
|
||||
|
||||
@Override
|
||||
public CharSequence subSequence(int start, int end) {
|
||||
return prefixMessage.subSequence(start, end);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object... params) {
|
||||
return new PrefixParameterizedMessage(prefix, message, params);
|
||||
}
|
||||
|
||||
private static class PrefixParameterizedMessage extends ParameterizedMessage {
|
||||
|
||||
private static ThreadLocal<StringBuilder> threadLocalStringBuilder = ThreadLocal.withInitial(StringBuilder::new);
|
||||
|
||||
private final String prefix;
|
||||
private String formattedMessage;
|
||||
|
||||
private PrefixParameterizedMessage(String prefix, String messagePattern, Object... arguments) {
|
||||
super(messagePattern, arguments);
|
||||
this.prefix = prefix;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFormattedMessage() {
|
||||
if (formattedMessage == null) {
|
||||
final StringBuilder buffer = threadLocalStringBuilder.get();
|
||||
buffer.setLength(0);
|
||||
formatTo(buffer);
|
||||
formattedMessage = buffer.toString();
|
||||
}
|
||||
return formattedMessage;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void formatTo(StringBuilder buffer) {
|
||||
buffer.append(prefix);
|
||||
super.formatTo(buffer);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(CharSequence charSequence) {
|
||||
return new PrefixSimpleMessage(prefix, charSequence);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1, Object p2) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(
|
||||
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8);
|
||||
}
|
||||
|
||||
@Override
|
||||
public Message newMessage(
|
||||
String message, Object p0, Object p1, Object p2, Object p3, Object p4, Object p5, Object p6, Object p7, Object p8, Object p9) {
|
||||
return new PrefixParameterizedMessage(prefix, message, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9);
|
||||
}
|
||||
}
|
|
@ -25,19 +25,12 @@ import org.elasticsearch.common.logging.Loggers;
|
|||
|
||||
/** An InfoStream (for Lucene's IndexWriter) that redirects
|
||||
* messages to "lucene.iw.ifd" and "lucene.iw" Logger.trace. */
|
||||
|
||||
public final class LoggerInfoStream extends InfoStream {
|
||||
/** Used for component-specific logging: */
|
||||
|
||||
/** Logger for everything */
|
||||
private final Logger logger;
|
||||
private final Logger parentLogger;
|
||||
|
||||
/** Logger for IndexFileDeleter */
|
||||
private final Logger ifdLogger;
|
||||
|
||||
public LoggerInfoStream(Logger parentLogger) {
|
||||
logger = Loggers.getLogger(parentLogger, ".lucene.iw");
|
||||
ifdLogger = Loggers.getLogger(parentLogger, ".lucene.iw.ifd");
|
||||
public LoggerInfoStream(final Logger parentLogger) {
|
||||
this.parentLogger = parentLogger;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -53,14 +46,11 @@ public final class LoggerInfoStream extends InfoStream {
|
|||
}
|
||||
|
||||
private Logger getLogger(String component) {
|
||||
if (component.equals("IFD")) {
|
||||
return ifdLogger;
|
||||
} else {
|
||||
return logger;
|
||||
}
|
||||
return Loggers.getLogger(parentLogger, "." + component);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() {
|
||||
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,11 +42,15 @@ import org.elasticsearch.http.HttpServerTransport;
|
|||
import org.elasticsearch.tasks.RawTaskStatus;
|
||||
import org.elasticsearch.tasks.Task;
|
||||
import org.elasticsearch.transport.Transport;
|
||||
import org.elasticsearch.transport.TransportInterceptor;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
import org.elasticsearch.transport.local.LocalTransport;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
|
||||
/**
|
||||
* A module to handle registering and binding all network related classes.
|
||||
|
@ -54,7 +58,6 @@ import java.util.List;
|
|||
public class NetworkModule extends AbstractModule {
|
||||
|
||||
public static final String TRANSPORT_TYPE_KEY = "transport.type";
|
||||
public static final String TRANSPORT_SERVICE_TYPE_KEY = "transport.service.type";
|
||||
public static final String HTTP_TYPE_KEY = "http.type";
|
||||
public static final String LOCAL_TRANSPORT = "local";
|
||||
public static final String HTTP_TYPE_DEFAULT_KEY = "http.type.default";
|
||||
|
@ -65,8 +68,6 @@ public class NetworkModule extends AbstractModule {
|
|||
public static final Setting<String> HTTP_DEFAULT_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_DEFAULT_KEY, Property.NodeScope);
|
||||
public static final Setting<String> HTTP_TYPE_SETTING = Setting.simpleString(HTTP_TYPE_KEY, Property.NodeScope);
|
||||
public static final Setting<Boolean> HTTP_ENABLED = Setting.boolSetting("http.enabled", true, Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_SERVICE_TYPE_SETTING =
|
||||
Setting.simpleString(TRANSPORT_SERVICE_TYPE_KEY, Property.NodeScope);
|
||||
public static final Setting<String> TRANSPORT_TYPE_SETTING = Setting.simpleString(TRANSPORT_TYPE_KEY, Property.NodeScope);
|
||||
|
||||
private final NetworkService networkService;
|
||||
|
@ -74,10 +75,10 @@ public class NetworkModule extends AbstractModule {
|
|||
private final boolean transportClient;
|
||||
|
||||
private final AllocationCommandRegistry allocationCommandRegistry = new AllocationCommandRegistry();
|
||||
private final ExtensionPoint.SelectedType<TransportService> transportServiceTypes = new ExtensionPoint.SelectedType<>("transport_service", TransportService.class);
|
||||
private final ExtensionPoint.SelectedType<Transport> transportTypes = new ExtensionPoint.SelectedType<>("transport", Transport.class);
|
||||
private final ExtensionPoint.SelectedType<HttpServerTransport> httpTransportTypes = new ExtensionPoint.SelectedType<>("http_transport", HttpServerTransport.class);
|
||||
private final List<NamedWriteableRegistry.Entry> namedWriteables = new ArrayList<>();
|
||||
private final List<TransportInterceptor> transportIntercetors = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Creates a network module that custom networking classes can be plugged into.
|
||||
|
@ -89,7 +90,6 @@ public class NetworkModule extends AbstractModule {
|
|||
this.networkService = networkService;
|
||||
this.settings = settings;
|
||||
this.transportClient = transportClient;
|
||||
registerTransportService("default", TransportService.class);
|
||||
registerTransport(LOCAL_TRANSPORT, LocalTransport.class);
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, ReplicationTask.Status.NAME, ReplicationTask.Status::new));
|
||||
namedWriteables.add(new NamedWriteableRegistry.Entry(Task.Status.class, RawTaskStatus.NAME, RawTaskStatus::new));
|
||||
|
@ -100,11 +100,6 @@ public class NetworkModule extends AbstractModule {
|
|||
return transportClient;
|
||||
}
|
||||
|
||||
/** Adds a transport service implementation that can be selected by setting {@link #TRANSPORT_SERVICE_TYPE_KEY}. */
|
||||
public void registerTransportService(String name, Class<? extends TransportService> clazz) {
|
||||
transportServiceTypes.registerExtension(name, clazz);
|
||||
}
|
||||
|
||||
/** Adds a transport implementation that can be selected by setting {@link #TRANSPORT_TYPE_KEY}. */
|
||||
public void registerTransport(String name, Class<? extends Transport> clazz) {
|
||||
transportTypes.registerExtension(name, clazz);
|
||||
|
@ -149,9 +144,9 @@ public class NetworkModule extends AbstractModule {
|
|||
@Override
|
||||
protected void configure() {
|
||||
bind(NetworkService.class).toInstance(networkService);
|
||||
transportServiceTypes.bindType(binder(), settings, TRANSPORT_SERVICE_TYPE_KEY, "default");
|
||||
bindTransportService();
|
||||
transportTypes.bindType(binder(), settings, TRANSPORT_TYPE_KEY, TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||
|
||||
bind(TransportInterceptor.class).toInstance(new CompositeTransportInterceptor(this.transportIntercetors));
|
||||
if (transportClient == false) {
|
||||
if (HTTP_ENABLED.get(settings)) {
|
||||
bind(HttpServer.class).asEagerSingleton();
|
||||
|
@ -181,4 +176,39 @@ public class NetworkModule extends AbstractModule {
|
|||
public boolean canRegisterHttpExtensions() {
|
||||
return transportClient == false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Registers a new {@link TransportInterceptor}
|
||||
*/
|
||||
public void addTransportInterceptor(TransportInterceptor interceptor) {
|
||||
this.transportIntercetors.add(Objects.requireNonNull(interceptor, "interceptor must not be null"));
|
||||
}
|
||||
|
||||
static final class CompositeTransportInterceptor implements TransportInterceptor {
|
||||
final List<TransportInterceptor> transportInterceptors;
|
||||
|
||||
private CompositeTransportInterceptor(List<TransportInterceptor> transportInterceptors) {
|
||||
this.transportInterceptors = new ArrayList<>(transportInterceptors);
|
||||
}
|
||||
|
||||
@Override
|
||||
public <T extends TransportRequest> TransportRequestHandler<T> interceptHandler(String action, TransportRequestHandler<T> actualHandler) {
|
||||
for (TransportInterceptor interceptor : this.transportInterceptors) {
|
||||
actualHandler = interceptor.interceptHandler(action, actualHandler);
|
||||
}
|
||||
return actualHandler;
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncSender interceptSender(AsyncSender sender) {
|
||||
for (TransportInterceptor interceptor : this.transportInterceptors) {
|
||||
sender = interceptor.interceptSender(sender);
|
||||
}
|
||||
return sender;
|
||||
}
|
||||
}
|
||||
|
||||
protected void bindTransportService() {
|
||||
bind(TransportService.class).asEagerSingleton();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,7 @@ import java.util.Arrays;
|
|||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Optional;
|
||||
|
||||
/**
|
||||
* Utilities for network interfaces / addresses binding and publishing.
|
||||
|
@ -227,14 +228,15 @@ public abstract class NetworkUtils {
|
|||
|
||||
/** Returns addresses for the given interface (it must be marked up) */
|
||||
static InetAddress[] getAddressesForInterface(String name) throws SocketException {
|
||||
NetworkInterface intf = NetworkInterface.getByName(name);
|
||||
if (intf == null) {
|
||||
Optional<NetworkInterface> networkInterface = getInterfaces().stream().filter((netIf) -> name.equals(netIf.getName())).findFirst();
|
||||
|
||||
if (networkInterface.isPresent() == false) {
|
||||
throw new IllegalArgumentException("No interface named '" + name + "' found, got " + getInterfaces());
|
||||
}
|
||||
if (!intf.isUp()) {
|
||||
if (!networkInterface.get().isUp()) {
|
||||
throw new IllegalArgumentException("Interface '" + name + "' is not up and running");
|
||||
}
|
||||
List<InetAddress> list = Collections.list(intf.getInetAddresses());
|
||||
List<InetAddress> list = Collections.list(networkInterface.get().getInetAddresses());
|
||||
if (list.isEmpty()) {
|
||||
throw new IllegalArgumentException("Interface '" + name + "' has no internet addresses");
|
||||
}
|
||||
|
|
|
@ -54,8 +54,8 @@ import org.elasticsearch.common.util.concurrent.EsExecutors;
|
|||
import org.elasticsearch.common.util.concurrent.ThreadContext;
|
||||
import org.elasticsearch.discovery.DiscoveryModule;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.FaultDetection;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastZenPing;
|
||||
import org.elasticsearch.env.Environment;
|
||||
|
@ -226,7 +226,6 @@ public final class ClusterSettings extends AbstractScopedSettings {
|
|||
NetworkModule.HTTP_DEFAULT_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING,
|
||||
NetworkModule.HTTP_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_SERVICE_TYPE_SETTING,
|
||||
NetworkModule.TRANSPORT_TYPE_SETTING,
|
||||
HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS,
|
||||
HttpTransportSettings.SETTING_CORS_ENABLED,
|
||||
|
|
|
@ -411,6 +411,12 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
|
||||
@Override
|
||||
public void apply(Tuple<A, B> value, Settings current, Settings previous) {
|
||||
if (aSettingUpdater.hasChanged(current, previous)) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", aSetting.key, aSetting.getRaw(previous), aSetting.getRaw(current));
|
||||
}
|
||||
if (bSettingUpdater.hasChanged(current, previous)) {
|
||||
logger.info("updating [{}] from [{}] to [{}]", bSetting.key, bSetting.getRaw(previous), bSetting.getRaw(current));
|
||||
}
|
||||
consumer.accept(value.v1(), value.v2());
|
||||
}
|
||||
|
||||
|
@ -551,10 +557,6 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return new Setting<>(key, defaultValueFn, Booleans::parseBooleanExact, properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, String percentage, Property... properties) {
|
||||
return new Setting<>(key, (s) -> percentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<ByteSizeValue> byteSizeSetting(String key, ByteSizeValue value, Property... properties) {
|
||||
return byteSizeSetting(key, (s) -> value.toString(), properties);
|
||||
}
|
||||
|
@ -591,6 +593,49 @@ public class Setting<T> extends ToXContentToBytes {
|
|||
return value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a setting which specifies a memory size. This can either be
|
||||
* specified as an absolute bytes value or as a percentage of the heap
|
||||
* memory.
|
||||
*
|
||||
* @param key the key for the setting
|
||||
* @param defaultValue the default value for this setting
|
||||
* @param properties properties properties for this setting like scope, filtering...
|
||||
* @return the setting object
|
||||
*/
|
||||
public static Setting<ByteSizeValue> memorySizeSetting(String key, ByteSizeValue defaultValue, Property... properties) {
|
||||
return memorySizeSetting(key, (s) -> defaultValue.toString(), properties);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Creates a setting which specifies a memory size. This can either be
|
||||
* specified as an absolute bytes value or as a percentage of the heap
|
||||
* memory.
|
||||
*
|
||||
* @param key the key for the setting
|
||||
* @param defaultValue a function that supplies the default value for this setting
|
||||
* @param properties properties properties for this setting like scope, filtering...
|
||||
* @return the setting object
|
||||
*/
|
||||
public static Setting<ByteSizeValue> memorySizeSetting(String key, Function<Settings, String> defaultValue, Property... properties) {
|
||||
return new Setting<>(key, defaultValue, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a setting which specifies a memory size. This can either be
|
||||
* specified as an absolute bytes value or as a percentage of the heap
|
||||
* memory.
|
||||
*
|
||||
* @param key the key for the setting
|
||||
* @param defaultPercentage the default value of this setting as a percentage of the heap memory
|
||||
* @param properties properties properties for this setting like scope, filtering...
|
||||
* @return the setting object
|
||||
*/
|
||||
public static Setting<ByteSizeValue> memorySizeSetting(String key, String defaultPercentage, Property... properties) {
|
||||
return new Setting<>(key, (s) -> defaultPercentage, (s) -> MemorySizeValue.parseBytesSizeValueOrHeapRatio(s, key), properties);
|
||||
}
|
||||
|
||||
public static Setting<TimeValue> positiveTimeSetting(String key, TimeValue defaultValue, Property... properties) {
|
||||
return timeSetting(key, defaultValue, TimeValue.timeValueMillis(0), properties);
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ public class PageCacheRecycler extends AbstractComponent implements Releasable {
|
|||
public static final Setting<Type> TYPE_SETTING =
|
||||
new Setting<>("cache.recycler.page.type", Type.CONCURRENT.name(), Type::parse, Property.NodeScope);
|
||||
public static final Setting<ByteSizeValue> LIMIT_HEAP_SETTING =
|
||||
Setting.byteSizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
|
||||
Setting.memorySizeSetting("cache.recycler.page.limit.heap", "10%", Property.NodeScope);
|
||||
public static final Setting<Double> WEIGHT_BYTES_SETTING =
|
||||
Setting.doubleSetting("cache.recycler.page.weight.bytes", 1d, 0d, Property.NodeScope);
|
||||
public static final Setting<Double> WEIGHT_LONG_SETTING =
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
import java.io.IOException;
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -20,14 +20,13 @@
|
|||
package org.elasticsearch.common.xcontent;
|
||||
|
||||
import org.elasticsearch.common.bytes.BytesReference;
|
||||
|
||||
import java.io.Closeable;
|
||||
import java.io.Flushable;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public interface XContentGenerator extends Closeable {
|
||||
public interface XContentGenerator extends Closeable, Flushable {
|
||||
|
||||
XContentType contentType();
|
||||
|
||||
|
@ -37,68 +36,62 @@ public interface XContentGenerator extends Closeable {
|
|||
|
||||
void usePrintLineFeedAtEnd();
|
||||
|
||||
void writeStartArray() throws IOException;
|
||||
|
||||
void writeEndArray() throws IOException;
|
||||
|
||||
void writeStartObject() throws IOException;
|
||||
|
||||
void writeEndObject() throws IOException;
|
||||
|
||||
void writeStartArray() throws IOException;
|
||||
|
||||
void writeEndArray() throws IOException;
|
||||
|
||||
void writeFieldName(String name) throws IOException;
|
||||
|
||||
void writeString(String text) throws IOException;
|
||||
|
||||
void writeString(char[] text, int offset, int len) throws IOException;
|
||||
|
||||
void writeUTF8String(byte[] text, int offset, int length) throws IOException;
|
||||
|
||||
void writeBinary(byte[] data, int offset, int len) throws IOException;
|
||||
|
||||
void writeBinary(byte[] data) throws IOException;
|
||||
|
||||
void writeNumber(int v) throws IOException;
|
||||
|
||||
void writeNumber(long v) throws IOException;
|
||||
|
||||
void writeNumber(double d) throws IOException;
|
||||
|
||||
void writeNumber(float f) throws IOException;
|
||||
|
||||
void writeBoolean(boolean state) throws IOException;
|
||||
|
||||
void writeNull() throws IOException;
|
||||
|
||||
void writeStringField(String fieldName, String value) throws IOException;
|
||||
void writeNullField(String name) throws IOException;
|
||||
|
||||
void writeBooleanField(String fieldName, boolean value) throws IOException;
|
||||
void writeBooleanField(String name, boolean value) throws IOException;
|
||||
|
||||
void writeNullField(String fieldName) throws IOException;
|
||||
void writeBoolean(boolean value) throws IOException;
|
||||
|
||||
void writeNumberField(String fieldName, int value) throws IOException;
|
||||
void writeNumberField(String name, double value) throws IOException;
|
||||
|
||||
void writeNumberField(String fieldName, long value) throws IOException;
|
||||
void writeNumber(double value) throws IOException;
|
||||
|
||||
void writeNumberField(String fieldName, double value) throws IOException;
|
||||
void writeNumberField(String name, float value) throws IOException;
|
||||
|
||||
void writeNumberField(String fieldName, float value) throws IOException;
|
||||
void writeNumber(float value) throws IOException;
|
||||
|
||||
void writeBinaryField(String fieldName, byte[] data) throws IOException;
|
||||
void writeNumberField(String name, int value) throws IOException;
|
||||
|
||||
void writeArrayFieldStart(String fieldName) throws IOException;
|
||||
void writeNumber(int value) throws IOException;
|
||||
|
||||
void writeObjectFieldStart(String fieldName) throws IOException;
|
||||
void writeNumberField(String name, long value) throws IOException;
|
||||
|
||||
void writeRawField(String fieldName, InputStream content) throws IOException;
|
||||
void writeNumber(long value) throws IOException;
|
||||
|
||||
void writeRawField(String fieldName, BytesReference content) throws IOException;
|
||||
void writeNumber(short value) throws IOException;
|
||||
|
||||
void writeRawValue(BytesReference content) throws IOException;
|
||||
void writeStringField(String name, String value) throws IOException;
|
||||
|
||||
void writeString(String value) throws IOException;
|
||||
|
||||
void writeString(char[] text, int offset, int len) throws IOException;
|
||||
|
||||
void writeUTF8String(byte[] value, int offset, int length) throws IOException;
|
||||
|
||||
void writeBinaryField(String name, byte[] value) throws IOException;
|
||||
|
||||
void writeBinary(byte[] value) throws IOException;
|
||||
|
||||
void writeBinary(byte[] value, int offset, int length) throws IOException;
|
||||
|
||||
void writeRawField(String name, InputStream value) throws IOException;
|
||||
|
||||
void writeRawField(String name, BytesReference value) throws IOException;
|
||||
|
||||
void writeRawValue(BytesReference value) throws IOException;
|
||||
|
||||
void copyCurrentStructure(XContentParser parser) throws IOException;
|
||||
|
||||
void flush() throws IOException;
|
||||
|
||||
@Override
|
||||
void close() throws IOException;
|
||||
}
|
||||
|
|
|
@ -47,30 +47,10 @@ public class JsonXContent implements XContent {
|
|||
}
|
||||
|
||||
private static final JsonFactory jsonFactory;
|
||||
public static final String JSON_ALLOW_UNQUOTED_FIELD_NAMES = "elasticsearch.json.allow_unquoted_field_names";
|
||||
public static final JsonXContent jsonXContent;
|
||||
public static final boolean unquotedFieldNamesSet;
|
||||
|
||||
static {
|
||||
jsonFactory = new JsonFactory();
|
||||
// TODO: Remove the system property configuration for this in Elasticsearch 6.0.0
|
||||
String jsonUnquoteProp = System.getProperty(JSON_ALLOW_UNQUOTED_FIELD_NAMES);
|
||||
if (jsonUnquoteProp == null) {
|
||||
unquotedFieldNamesSet = false;
|
||||
jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, false);
|
||||
} else {
|
||||
unquotedFieldNamesSet = true;
|
||||
switch (jsonUnquoteProp) {
|
||||
case "true":
|
||||
jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, true);
|
||||
break;
|
||||
case "false":
|
||||
jsonFactory.configure(JsonParser.Feature.ALLOW_UNQUOTED_FIELD_NAMES, false);
|
||||
break;
|
||||
default:
|
||||
throw new IllegalArgumentException("invalid value for [" + JSON_ALLOW_UNQUOTED_FIELD_NAMES + "]: " + jsonUnquoteProp);
|
||||
}
|
||||
}
|
||||
jsonFactory.configure(JsonGenerator.Feature.QUOTE_FIELD_NAMES, true);
|
||||
jsonFactory.configure(JsonParser.Feature.ALLOW_COMMENTS, true);
|
||||
jsonFactory.configure(JsonFactory.Feature.FAIL_ON_SYMBOL_HASH_OVERFLOW, false); // this trips on many mappings now...
|
||||
|
|
|
@ -47,9 +47,6 @@ import java.util.Collections;
|
|||
import java.util.Objects;
|
||||
import java.util.Set;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class JsonXContentGenerator implements XContentGenerator {
|
||||
|
||||
/** Generator used to write content **/
|
||||
|
@ -130,16 +127,6 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
writeLineFeedAtEnd = true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeStartArray() throws IOException {
|
||||
generator.writeStartArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeEndArray() throws IOException {
|
||||
generator.writeEndArray();
|
||||
}
|
||||
|
||||
private boolean isFiltered() {
|
||||
return filter != null;
|
||||
}
|
||||
|
@ -184,118 +171,124 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
generator.writeEndObject();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void writeStartArray() throws IOException {
|
||||
generator.writeStartArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeEndArray() throws IOException {
|
||||
generator.writeEndArray();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeFieldName(String name) throws IOException {
|
||||
generator.writeFieldName(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeString(String text) throws IOException {
|
||||
generator.writeString(text);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeString(char[] text, int offset, int len) throws IOException {
|
||||
generator.writeString(text, offset, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUTF8String(byte[] text, int offset, int length) throws IOException {
|
||||
generator.writeUTF8String(text, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinary(byte[] data, int offset, int len) throws IOException {
|
||||
generator.writeBinary(data, offset, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinary(byte[] data) throws IOException {
|
||||
generator.writeBinary(data);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumber(int v) throws IOException {
|
||||
generator.writeNumber(v);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumber(long v) throws IOException {
|
||||
generator.writeNumber(v);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumber(double d) throws IOException {
|
||||
generator.writeNumber(d);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumber(float f) throws IOException {
|
||||
generator.writeNumber(f);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBoolean(boolean state) throws IOException {
|
||||
generator.writeBoolean(state);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNull() throws IOException {
|
||||
generator.writeNull();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeStringField(String fieldName, String value) throws IOException {
|
||||
generator.writeStringField(fieldName, value);
|
||||
public void writeNullField(String name) throws IOException {
|
||||
generator.writeNullField(name);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBooleanField(String fieldName, boolean value) throws IOException {
|
||||
generator.writeBooleanField(fieldName, value);
|
||||
public void writeBooleanField(String name, boolean value) throws IOException {
|
||||
generator.writeBooleanField(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNullField(String fieldName) throws IOException {
|
||||
generator.writeNullField(fieldName);
|
||||
public void writeBoolean(boolean value) throws IOException {
|
||||
generator.writeBoolean(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumberField(String fieldName, int value) throws IOException {
|
||||
generator.writeNumberField(fieldName, value);
|
||||
public void writeNumberField(String name, double value) throws IOException {
|
||||
generator.writeNumberField(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumberField(String fieldName, long value) throws IOException {
|
||||
generator.writeNumberField(fieldName, value);
|
||||
public void writeNumber(double value) throws IOException {
|
||||
generator.writeNumber(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumberField(String fieldName, double value) throws IOException {
|
||||
generator.writeNumberField(fieldName, value);
|
||||
public void writeNumberField(String name, float value) throws IOException {
|
||||
generator.writeNumberField(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumberField(String fieldName, float value) throws IOException {
|
||||
generator.writeNumberField(fieldName, value);
|
||||
public void writeNumber(float value) throws IOException {
|
||||
generator.writeNumber(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinaryField(String fieldName, byte[] data) throws IOException {
|
||||
generator.writeBinaryField(fieldName, data);
|
||||
public void writeNumberField(String name, int value) throws IOException {
|
||||
generator.writeNumberField(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeArrayFieldStart(String fieldName) throws IOException {
|
||||
generator.writeArrayFieldStart(fieldName);
|
||||
public void writeNumber(int value) throws IOException {
|
||||
generator.writeNumber(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeObjectFieldStart(String fieldName) throws IOException {
|
||||
generator.writeObjectFieldStart(fieldName);
|
||||
public void writeNumberField(String name, long value) throws IOException {
|
||||
generator.writeNumberField(name, value);
|
||||
}
|
||||
|
||||
private void writeStartRaw(String fieldName) throws IOException {
|
||||
writeFieldName(fieldName);
|
||||
@Override
|
||||
public void writeNumber(long value) throws IOException {
|
||||
generator.writeNumber(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeNumber(short value) throws IOException {
|
||||
generator.writeNumber(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeStringField(String name, String value) throws IOException {
|
||||
generator.writeStringField(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeString(String value) throws IOException {
|
||||
generator.writeString(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeString(char[] value, int offset, int len) throws IOException {
|
||||
generator.writeString(value, offset, len);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeUTF8String(byte[] value, int offset, int length) throws IOException {
|
||||
generator.writeUTF8String(value, offset, length);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinaryField(String name, byte[] value) throws IOException {
|
||||
generator.writeBinaryField(name, value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinary(byte[] value) throws IOException {
|
||||
generator.writeBinary(value);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeBinary(byte[] value, int offset, int len) throws IOException {
|
||||
generator.writeBinary(value, offset, len);
|
||||
}
|
||||
|
||||
private void writeStartRaw(String name) throws IOException {
|
||||
writeFieldName(name);
|
||||
generator.writeRaw(':');
|
||||
}
|
||||
|
||||
|
@ -309,7 +302,7 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void writeRawField(String fieldName, InputStream content) throws IOException {
|
||||
public void writeRawField(String name, InputStream content) throws IOException {
|
||||
if (content.markSupported() == false) {
|
||||
// needed for the XContentFactory.xContentType call
|
||||
content = new BufferedInputStream(content);
|
||||
|
@ -321,11 +314,11 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
if (mayWriteRawData(contentType) == false) {
|
||||
try (XContentParser parser = XContentFactory.xContent(contentType).createParser(content)) {
|
||||
parser.nextToken();
|
||||
writeFieldName(fieldName);
|
||||
writeFieldName(name);
|
||||
copyCurrentStructure(parser);
|
||||
}
|
||||
} else {
|
||||
writeStartRaw(fieldName);
|
||||
writeStartRaw(name);
|
||||
flush();
|
||||
Streams.copy(content, os);
|
||||
writeEndRaw();
|
||||
|
@ -333,16 +326,16 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
|
||||
@Override
|
||||
public final void writeRawField(String fieldName, BytesReference content) throws IOException {
|
||||
public final void writeRawField(String name, BytesReference content) throws IOException {
|
||||
XContentType contentType = XContentFactory.xContentType(content);
|
||||
if (contentType == null) {
|
||||
throw new IllegalArgumentException("Can't write raw bytes whose xcontent-type can't be guessed");
|
||||
}
|
||||
if (mayWriteRawData(contentType) == false) {
|
||||
writeFieldName(fieldName);
|
||||
writeFieldName(name);
|
||||
copyRawValue(content, contentType.xContent());
|
||||
} else {
|
||||
writeStartRaw(fieldName);
|
||||
writeStartRaw(name);
|
||||
flush();
|
||||
content.writeTo(os);
|
||||
writeEndRaw();
|
||||
|
@ -416,7 +409,7 @@ public class JsonXContentGenerator implements XContentGenerator {
|
|||
}
|
||||
JsonStreamContext context = generator.getOutputContext();
|
||||
if ((context != null) && (context.inRoot() == false)) {
|
||||
throw new IOException("unclosed object or array found");
|
||||
throw new IOException("Unclosed object or array found");
|
||||
}
|
||||
if (writeLineFeedAtEnd) {
|
||||
flush();
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
|
||||
package org.elasticsearch.discovery;
|
||||
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
import org.elasticsearch.common.settings.Setting;
|
||||
|
@ -27,8 +26,8 @@ import org.elasticsearch.common.settings.Setting.Property;
|
|||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.ExtensionPoint;
|
||||
import org.elasticsearch.discovery.local.LocalDiscovery;
|
||||
import org.elasticsearch.discovery.zen.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ZenDiscovery;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPing;
|
||||
import org.elasticsearch.discovery.zen.ping.ZenPingService;
|
||||
import org.elasticsearch.discovery.zen.ping.unicast.UnicastHostsProvider;
|
||||
|
|
|
@ -22,26 +22,34 @@ package org.elasticsearch.discovery;
|
|||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.io.stream.Writeable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class DiscoveryStats implements Streamable, ToXContent {
|
||||
public class DiscoveryStats implements Writeable, ToXContent {
|
||||
|
||||
@Nullable
|
||||
private PendingClusterStateStats queueStats;
|
||||
private final PendingClusterStateStats queueStats;
|
||||
|
||||
public DiscoveryStats(PendingClusterStateStats queueStats) {
|
||||
this.queueStats = queueStats;
|
||||
}
|
||||
|
||||
public DiscoveryStats(StreamInput in) throws IOException {
|
||||
queueStats = in.readOptionalWriteable(PendingClusterStateStats::new);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
out.writeOptionalWriteable(queueStats);
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
builder.startObject(Fields.DISCOVERY);
|
||||
|
||||
if (queueStats != null ){
|
||||
queueStats.toXContent(builder, params);
|
||||
}
|
||||
|
@ -49,24 +57,6 @@ public class DiscoveryStats implements Streamable, ToXContent {
|
|||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
if (in.readBoolean()) {
|
||||
queueStats = new PendingClusterStateStats();
|
||||
queueStats.readFrom(in);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
if (queueStats != null ) {
|
||||
out.writeBoolean(true);
|
||||
queueStats.writeTo(out);
|
||||
}else{
|
||||
out.writeBoolean(false);
|
||||
}
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final String DISCOVERY = "discovery";
|
||||
}
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.elasticsearch.discovery.BlockingClusterStatePublishResponseHandler;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.publish.PendingClusterStateStats;
|
||||
|
||||
import java.util.HashSet;
|
||||
import java.util.Optional;
|
||||
|
@ -281,7 +282,7 @@ public class LocalDiscovery extends AbstractLifecycleComponent implements Discov
|
|||
|
||||
@Override
|
||||
public DiscoveryStats stats() {
|
||||
return new DiscoveryStats(null);
|
||||
return new DiscoveryStats((PendingClusterStateStats)null);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -17,11 +17,10 @@
|
|||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.discovery.zen.elect;
|
||||
package org.elasticsearch.discovery.zen;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectContainer;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
|
@ -33,9 +32,11 @@ import org.elasticsearch.common.util.CollectionUtils;
|
|||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Comparator;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Objects;
|
||||
import java.util.stream.Collectors;
|
||||
|
||||
/**
|
||||
*
|
||||
|
@ -45,17 +46,64 @@ public class ElectMasterService extends AbstractComponent {
|
|||
public static final Setting<Integer> DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING =
|
||||
Setting.intSetting("discovery.zen.minimum_master_nodes", -1, Property.Dynamic, Property.NodeScope);
|
||||
|
||||
// This is the minimum version a master needs to be on, otherwise it gets ignored
|
||||
// This is based on the minimum compatible version of the current version this node is on
|
||||
private final Version minMasterVersion;
|
||||
private final NodeComparator nodeComparator = new NodeComparator();
|
||||
|
||||
private volatile int minimumMasterNodes;
|
||||
|
||||
/**
|
||||
* a class to encapsulate all the information about a candidate in a master election
|
||||
* that is needed to decided which of the candidates should win
|
||||
*/
|
||||
public static class MasterCandidate {
|
||||
|
||||
public static final long UNRECOVERED_CLUSTER_VERSION = -1;
|
||||
|
||||
final DiscoveryNode node;
|
||||
|
||||
final long clusterStateVersion;
|
||||
|
||||
public MasterCandidate(DiscoveryNode node, long clusterStateVersion) {
|
||||
Objects.requireNonNull(node);
|
||||
assert clusterStateVersion >= -1 : "got: " + clusterStateVersion;
|
||||
assert node.isMasterNode();
|
||||
this.node = node;
|
||||
this.clusterStateVersion = clusterStateVersion;
|
||||
}
|
||||
|
||||
public DiscoveryNode getNode() {
|
||||
return node;
|
||||
}
|
||||
|
||||
public long getClusterStateVersion() {
|
||||
return clusterStateVersion;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "Candidate{" +
|
||||
"node=" + node +
|
||||
", clusterStateVersion=" + clusterStateVersion +
|
||||
'}';
|
||||
}
|
||||
|
||||
/**
|
||||
* compares two candidates to indicate which the a better master.
|
||||
* A higher cluster state version is better
|
||||
*
|
||||
* @return -1 if c1 is a batter candidate, 1 if c2.
|
||||
*/
|
||||
public static int compare(MasterCandidate c1, MasterCandidate c2) {
|
||||
// we explicitly swap c1 and c2 here. the code expects "better" is lower in a sorted
|
||||
// list, so if c2 has a higher cluster state version, it needs to come first.
|
||||
int ret = Long.compare(c2.clusterStateVersion, c1.clusterStateVersion);
|
||||
if (ret == 0) {
|
||||
ret = compareNodes(c1.getNode(), c2.getNode());
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@Inject
|
||||
public ElectMasterService(Settings settings) {
|
||||
super(settings);
|
||||
this.minMasterVersion = Version.CURRENT.minimumCompatibilityVersion();
|
||||
this.minimumMasterNodes = DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.get(settings);
|
||||
logger.debug("using minimum_master_nodes [{}]", minimumMasterNodes);
|
||||
}
|
||||
|
@ -69,16 +117,41 @@ public class ElectMasterService extends AbstractComponent {
|
|||
}
|
||||
|
||||
public boolean hasEnoughMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
if (minimumMasterNodes < 1) {
|
||||
return true;
|
||||
}
|
||||
int count = 0;
|
||||
for (DiscoveryNode node : nodes) {
|
||||
if (node.isMasterNode()) {
|
||||
count++;
|
||||
}
|
||||
}
|
||||
return count >= minimumMasterNodes;
|
||||
return count > 0 && (minimumMasterNodes < 0 || count >= minimumMasterNodes);
|
||||
}
|
||||
|
||||
public boolean hasEnoughCandidates(Collection<MasterCandidate> candidates) {
|
||||
if (candidates.isEmpty()) {
|
||||
return false;
|
||||
}
|
||||
if (minimumMasterNodes < 1) {
|
||||
return true;
|
||||
}
|
||||
assert candidates.stream().map(MasterCandidate::getNode).collect(Collectors.toSet()).size() == candidates.size() :
|
||||
"duplicates ahead: " + candidates;
|
||||
return candidates.size() >= minimumMasterNodes;
|
||||
}
|
||||
|
||||
/**
|
||||
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
|
||||
* if no master has been elected.
|
||||
*/
|
||||
public MasterCandidate electMaster(Collection<MasterCandidate> candidates) {
|
||||
assert hasEnoughCandidates(candidates);
|
||||
List<MasterCandidate> sortedCandidates = new ArrayList<>(candidates);
|
||||
sortedCandidates.sort(MasterCandidate::compare);
|
||||
return sortedCandidates.get(0);
|
||||
}
|
||||
|
||||
/** selects the best active master to join, where multiple are discovered */
|
||||
public DiscoveryNode tieBreakActiveMasters(Collection<DiscoveryNode> activeMasters) {
|
||||
return activeMasters.stream().min(ElectMasterService::compareNodes).get();
|
||||
}
|
||||
|
||||
public boolean hasTooManyMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
|
@ -107,7 +180,7 @@ public class ElectMasterService extends AbstractComponent {
|
|||
*/
|
||||
public List<DiscoveryNode> sortByMasterLikelihood(Iterable<DiscoveryNode> nodes) {
|
||||
ArrayList<DiscoveryNode> sortedNodes = CollectionUtils.iterableAsArrayList(nodes);
|
||||
CollectionUtil.introSort(sortedNodes, nodeComparator);
|
||||
CollectionUtil.introSort(sortedNodes, ElectMasterService::compareNodes);
|
||||
return sortedNodes;
|
||||
}
|
||||
|
||||
|
@ -130,25 +203,6 @@ public class ElectMasterService extends AbstractComponent {
|
|||
return nextPossibleMasters.toArray(new DiscoveryNode[nextPossibleMasters.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Elects a new master out of the possible nodes, returning it. Returns <tt>null</tt>
|
||||
* if no master has been elected.
|
||||
*/
|
||||
public DiscoveryNode electMaster(Iterable<DiscoveryNode> nodes) {
|
||||
List<DiscoveryNode> sortedNodes = sortedMasterNodes(nodes);
|
||||
if (sortedNodes == null || sortedNodes.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
DiscoveryNode masterNode = sortedNodes.get(0);
|
||||
// Sanity check: maybe we don't end up here, because serialization may have failed.
|
||||
if (masterNode.getVersion().before(minMasterVersion)) {
|
||||
logger.warn("ignoring master [{}], because the version [{}] is lower than the minimum compatible version [{}]", masterNode, masterNode.getVersion(), minMasterVersion);
|
||||
return null;
|
||||
} else {
|
||||
return masterNode;
|
||||
}
|
||||
}
|
||||
|
||||
private List<DiscoveryNode> sortedMasterNodes(Iterable<DiscoveryNode> nodes) {
|
||||
List<DiscoveryNode> possibleNodes = CollectionUtils.iterableAsArrayList(nodes);
|
||||
if (possibleNodes.isEmpty()) {
|
||||
|
@ -161,21 +215,18 @@ public class ElectMasterService extends AbstractComponent {
|
|||
it.remove();
|
||||
}
|
||||
}
|
||||
CollectionUtil.introSort(possibleNodes, nodeComparator);
|
||||
CollectionUtil.introSort(possibleNodes, ElectMasterService::compareNodes);
|
||||
return possibleNodes;
|
||||
}
|
||||
|
||||
private static class NodeComparator implements Comparator<DiscoveryNode> {
|
||||
|
||||
@Override
|
||||
public int compare(DiscoveryNode o1, DiscoveryNode o2) {
|
||||
if (o1.isMasterNode() && !o2.isMasterNode()) {
|
||||
return -1;
|
||||
}
|
||||
if (!o1.isMasterNode() && o2.isMasterNode()) {
|
||||
return 1;
|
||||
}
|
||||
return o1.getId().compareTo(o2.getId());
|
||||
/** master nodes go before other nodes, with a secondary sort by id **/
|
||||
private static int compareNodes(DiscoveryNode o1, DiscoveryNode o2) {
|
||||
if (o1.isMasterNode() && !o2.isMasterNode()) {
|
||||
return -1;
|
||||
}
|
||||
if (!o1.isMasterNode() && o2.isMasterNode()) {
|
||||
return 1;
|
||||
}
|
||||
return o1.getId().compareTo(o2.getId());
|
||||
}
|
||||
}
|
|
@ -41,7 +41,6 @@ import org.elasticsearch.common.settings.Settings;
|
|||
import org.elasticsearch.common.transport.LocalTransportAddress;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
|
|
@ -24,7 +24,6 @@ import org.apache.logging.log4j.message.ParameterizedMessage;
|
|||
import org.apache.logging.log4j.util.Supplier;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterChangedEvent;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
|
@ -56,7 +55,6 @@ import org.elasticsearch.common.unit.TimeValue;
|
|||
import org.elasticsearch.discovery.Discovery;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.discovery.DiscoveryStats;
|
||||
import org.elasticsearch.discovery.zen.elect.ElectMasterService;
|
||||
import org.elasticsearch.discovery.zen.fd.MasterFaultDetection;
|
||||
import org.elasticsearch.discovery.zen.fd.NodesFaultDetection;
|
||||
import org.elasticsearch.discovery.zen.membership.MembershipAction;
|
||||
|
@ -76,13 +74,10 @@ import org.elasticsearch.transport.TransportService;
|
|||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicLong;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
import java.util.function.BiFunction;
|
||||
import java.util.stream.Collectors;
|
||||
|
@ -146,9 +141,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
private final JoinThreadControl joinThreadControl;
|
||||
|
||||
/** counts the time this node has joined the cluster or have elected it self as master */
|
||||
private final AtomicLong clusterJoinsCounter = new AtomicLong();
|
||||
|
||||
// must initialized in doStart(), when we have the allocationService set
|
||||
private volatile NodeJoinController nodeJoinController;
|
||||
private volatile NodeRemovalClusterStateTaskExecutor nodeRemovalExecutor;
|
||||
|
@ -284,8 +276,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
protected void doClose() {
|
||||
masterFD.close();
|
||||
nodesFD.close();
|
||||
publishClusterState.close();
|
||||
membership.close();
|
||||
pingService.close();
|
||||
}
|
||||
|
||||
|
@ -306,8 +296,8 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean nodeHasJoinedClusterOnce() {
|
||||
return clusterJoinsCounter.get() > 0;
|
||||
public ClusterState clusterState() {
|
||||
return clusterService.state();
|
||||
}
|
||||
|
||||
/** end of {@link org.elasticsearch.discovery.zen.ping.PingContextProvider } implementation */
|
||||
|
@ -318,7 +308,7 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
if (!clusterChangedEvent.state().getNodes().isLocalNodeElectedMaster()) {
|
||||
throw new IllegalStateException("Shouldn't publish state when not master");
|
||||
}
|
||||
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
|
||||
|
||||
try {
|
||||
publishClusterState.publish(clusterChangedEvent, electMaster.minimumMasterNodes(), ackListener);
|
||||
} catch (FailedToCommitClusterStateException t) {
|
||||
|
@ -338,6 +328,17 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
});
|
||||
throw t;
|
||||
}
|
||||
|
||||
// update the set of nodes to ping after the new cluster state has been published
|
||||
nodesFD.updateNodesAndPing(clusterChangedEvent.state());
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current set of nodes involved in the node fault detection.
|
||||
* NB: for testing purposes
|
||||
*/
|
||||
public Set<DiscoveryNode> getFaultDetectionNodes() {
|
||||
return nodesFD.getNodes();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -397,8 +398,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
joinThreadControl.markThreadAsDone(currentThread);
|
||||
// we only starts nodesFD if we are master (it may be that we received a cluster state while pinging)
|
||||
nodesFD.updateNodesAndPing(state); // start the nodes FD
|
||||
long count = clusterJoinsCounter.incrementAndGet();
|
||||
logger.trace("cluster joins counter set to [{}] (elected as master)", count);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -755,9 +754,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
if (currentState.blocks().hasGlobalBlock(discoverySettings.getNoMasterBlock())) {
|
||||
// its a fresh update from the master as we transition from a start of not having a master to having one
|
||||
logger.debug("got first state from fresh master [{}]", newClusterState.nodes().getMasterNodeId());
|
||||
long count = clusterJoinsCounter.incrementAndGet();
|
||||
logger.trace("updated cluster join cluster to [{}]", count);
|
||||
|
||||
return newClusterState;
|
||||
}
|
||||
|
||||
|
@ -864,16 +860,6 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
} else if (nodeJoinController == null) {
|
||||
throw new IllegalStateException("discovery module is not yet started");
|
||||
} else {
|
||||
// The minimum supported version for a node joining a master:
|
||||
Version minimumNodeJoinVersion = localNode().getVersion().minimumCompatibilityVersion();
|
||||
// Sanity check: maybe we don't end up here, because serialization may have failed.
|
||||
if (node.getVersion().before(minimumNodeJoinVersion)) {
|
||||
callback.onFailure(
|
||||
new IllegalStateException("Can't handle join request from a node with a version [" + node.getVersion() + "] that is lower than the minimum compatible version [" + minimumNodeJoinVersion.minimumCompatibilityVersion() + "]")
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// try and connect to the node, if it fails, we can raise an exception back to the client...
|
||||
transportService.connectToNode(node);
|
||||
|
||||
|
@ -892,14 +878,14 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
|
||||
private DiscoveryNode findMaster() {
|
||||
logger.trace("starting to ping");
|
||||
ZenPing.PingResponse[] fullPingResponses = pingService.pingAndWait(pingTimeout);
|
||||
List<ZenPing.PingResponse> fullPingResponses = pingService.pingAndWait(pingTimeout).toList();
|
||||
if (fullPingResponses == null) {
|
||||
logger.trace("No full ping responses");
|
||||
return null;
|
||||
}
|
||||
if (logger.isTraceEnabled()) {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
if (fullPingResponses.length == 0) {
|
||||
if (fullPingResponses.size() == 0) {
|
||||
sb.append(" {none}");
|
||||
} else {
|
||||
for (ZenPing.PingResponse pingResponse : fullPingResponses) {
|
||||
|
@ -909,69 +895,57 @@ public class ZenDiscovery extends AbstractLifecycleComponent implements Discover
|
|||
logger.trace("full ping responses:{}", sb);
|
||||
}
|
||||
|
||||
final DiscoveryNode localNode = clusterService.localNode();
|
||||
|
||||
// add our selves
|
||||
assert fullPingResponses.stream().map(ZenPing.PingResponse::node)
|
||||
.filter(n -> n.equals(localNode)).findAny().isPresent() == false;
|
||||
|
||||
fullPingResponses.add(new ZenPing.PingResponse(localNode, null, clusterService.state()));
|
||||
|
||||
// filter responses
|
||||
final List<ZenPing.PingResponse> pingResponses = filterPingResponses(fullPingResponses, masterElectionIgnoreNonMasters, logger);
|
||||
|
||||
final DiscoveryNode localNode = clusterService.localNode();
|
||||
List<DiscoveryNode> pingMasters = new ArrayList<>();
|
||||
List<DiscoveryNode> activeMasters = new ArrayList<>();
|
||||
for (ZenPing.PingResponse pingResponse : pingResponses) {
|
||||
if (pingResponse.master() != null) {
|
||||
// We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
|
||||
// any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
|
||||
if (!localNode.equals(pingResponse.master())) {
|
||||
pingMasters.add(pingResponse.master());
|
||||
}
|
||||
// We can't include the local node in pingMasters list, otherwise we may up electing ourselves without
|
||||
// any check / verifications from other nodes in ZenDiscover#innerJoinCluster()
|
||||
if (pingResponse.master() != null && !localNode.equals(pingResponse.master())) {
|
||||
activeMasters.add(pingResponse.master());
|
||||
}
|
||||
}
|
||||
|
||||
// nodes discovered during pinging
|
||||
Set<DiscoveryNode> activeNodes = new HashSet<>();
|
||||
// nodes discovered who has previously been part of the cluster and do not ping for the very first time
|
||||
Set<DiscoveryNode> joinedOnceActiveNodes = new HashSet<>();
|
||||
if (localNode.isMasterNode()) {
|
||||
activeNodes.add(localNode);
|
||||
long joinsCounter = clusterJoinsCounter.get();
|
||||
if (joinsCounter > 0) {
|
||||
logger.trace("adding local node to the list of active nodes that have previously joined the cluster (joins counter is [{}])", joinsCounter);
|
||||
joinedOnceActiveNodes.add(localNode);
|
||||
}
|
||||
}
|
||||
List<ElectMasterService.MasterCandidate> masterCandidates = new ArrayList<>();
|
||||
for (ZenPing.PingResponse pingResponse : pingResponses) {
|
||||
activeNodes.add(pingResponse.node());
|
||||
if (pingResponse.hasJoinedOnce()) {
|
||||
joinedOnceActiveNodes.add(pingResponse.node());
|
||||
if (pingResponse.node().isMasterNode()) {
|
||||
masterCandidates.add(new ElectMasterService.MasterCandidate(pingResponse.node(), pingResponse.getClusterStateVersion()));
|
||||
}
|
||||
}
|
||||
|
||||
if (pingMasters.isEmpty()) {
|
||||
if (electMaster.hasEnoughMasterNodes(activeNodes)) {
|
||||
// we give preference to nodes who have previously already joined the cluster. Those will
|
||||
// have a cluster state in memory, including an up to date routing table (which is not persistent to disk
|
||||
// by the gateway)
|
||||
DiscoveryNode master = electMaster.electMaster(joinedOnceActiveNodes);
|
||||
if (master != null) {
|
||||
return master;
|
||||
}
|
||||
return electMaster.electMaster(activeNodes);
|
||||
if (activeMasters.isEmpty()) {
|
||||
if (electMaster.hasEnoughCandidates(masterCandidates)) {
|
||||
final ElectMasterService.MasterCandidate winner = electMaster.electMaster(masterCandidates);
|
||||
logger.trace("candidate {} won election", winner);
|
||||
return winner.getNode();
|
||||
} else {
|
||||
// if we don't have enough master nodes, we bail, because there are not enough master to elect from
|
||||
logger.trace("not enough master nodes [{}]", activeNodes);
|
||||
logger.trace("not enough master nodes [{}]", masterCandidates);
|
||||
return null;
|
||||
}
|
||||
} else {
|
||||
|
||||
assert !pingMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
|
||||
assert !activeMasters.contains(localNode) : "local node should never be elected as master when other nodes indicate an active master";
|
||||
// lets tie break between discovered nodes
|
||||
return electMaster.electMaster(pingMasters);
|
||||
return electMaster.tieBreakActiveMasters(activeMasters);
|
||||
}
|
||||
}
|
||||
|
||||
static List<ZenPing.PingResponse> filterPingResponses(ZenPing.PingResponse[] fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
|
||||
static List<ZenPing.PingResponse> filterPingResponses(List<ZenPing.PingResponse> fullPingResponses, boolean masterElectionIgnoreNonMasters, Logger logger) {
|
||||
List<ZenPing.PingResponse> pingResponses;
|
||||
if (masterElectionIgnoreNonMasters) {
|
||||
pingResponses = Arrays.stream(fullPingResponses).filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
|
||||
pingResponses = fullPingResponses.stream().filter(ping -> ping.node().isMasterNode()).collect(Collectors.toList());
|
||||
} else {
|
||||
pingResponses = Arrays.asList(fullPingResponses);
|
||||
pingResponses = fullPingResponses;
|
||||
}
|
||||
|
||||
if (logger.isDebugEnabled()) {
|
||||
|
|
|
@ -168,7 +168,6 @@ public class MasterFaultDetection extends FaultDetection {
|
|||
super.close();
|
||||
stop("closing");
|
||||
this.listeners.clear();
|
||||
transportService.removeHandler(MASTER_PING_ACTION_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -41,6 +41,8 @@ import org.elasticsearch.transport.TransportResponseHandler;
|
|||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CopyOnWriteArrayList;
|
||||
|
||||
|
@ -91,6 +93,14 @@ public class NodesFaultDetection extends FaultDetection {
|
|||
listeners.remove(listener);
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the current set of nodes involved in node fault detection.
|
||||
* NB: For testing purposes.
|
||||
*/
|
||||
public Set<DiscoveryNode> getNodes() {
|
||||
return Collections.unmodifiableSet(nodesFD.keySet());
|
||||
}
|
||||
|
||||
/**
|
||||
* make sure that nodes in clusterState are pinged. Any pinging to nodes which are not
|
||||
* part of the cluster will be stopped
|
||||
|
@ -129,7 +139,6 @@ public class NodesFaultDetection extends FaultDetection {
|
|||
public void close() {
|
||||
super.close();
|
||||
stop();
|
||||
transportService.removeHandler(PING_ACTION_NAME);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -76,12 +76,6 @@ public class MembershipAction extends AbstractComponent {
|
|||
transportService.registerRequestHandler(DISCOVERY_LEAVE_ACTION_NAME, LeaveRequest::new, ThreadPool.Names.GENERIC, new LeaveRequestRequestHandler());
|
||||
}
|
||||
|
||||
public void close() {
|
||||
transportService.removeHandler(DISCOVERY_JOIN_ACTION_NAME);
|
||||
transportService.removeHandler(DISCOVERY_JOIN_VALIDATE_ACTION_NAME);
|
||||
transportService.removeHandler(DISCOVERY_LEAVE_ACTION_NAME);
|
||||
}
|
||||
|
||||
public void sendLeaveRequest(DiscoveryNode masterNode, DiscoveryNode node) {
|
||||
transportService.sendRequest(node, DISCOVERY_LEAVE_ACTION_NAME, new LeaveRequest(masterNode), EmptyTransportResponseHandler.INSTANCE_SAME);
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue