Merge branch 'master' into sort-add-build

This commit is contained in:
Christoph Büscher 2016-03-22 12:20:56 +01:00
commit 25da6b2f2e
434 changed files with 15113 additions and 13135 deletions

39
Vagrantfile vendored
View File

@ -23,15 +23,15 @@
Vagrant.configure(2) do |config|
config.vm.define "ubuntu-1204" do |config|
config.vm.box = "ubuntu/precise64"
config.vm.box = "elastic/ubuntu-12.04-x86_64"
ubuntu_common config
end
config.vm.define "ubuntu-1404" do |config|
config.vm.box = "ubuntu/trusty64"
config.vm.box = "elastic/ubuntu-14.04-x86_64"
ubuntu_common config
end
config.vm.define "ubuntu-1504" do |config|
config.vm.box = "ubuntu/vivid64"
config.vm.box = "elastic/ubuntu-15.04-x86_64"
ubuntu_common config, extra: <<-SHELL
# Install Jayatana so we can work around it being present.
[ -f /usr/share/java/jayatanaag.jar ] || install jayatana
@ -41,44 +41,35 @@ Vagrant.configure(2) do |config|
# get the sun jdk on there just aren't worth it. We have jessie for testing
# debian and it works fine.
config.vm.define "debian-8" do |config|
config.vm.box = "debian/jessie64"
deb_common config,
'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
config.vm.box = "elastic/debian-8-x86_64"
deb_common config, 'echo deb http://http.debian.net/debian jessie-backports main > /etc/apt/sources.list.d/backports.list', 'backports'
end
config.vm.define "centos-6" do |config|
config.vm.box = "boxcutter/centos67"
config.vm.box = "elastic/centos-6-x86_64"
rpm_common config
end
config.vm.define "centos-7" do |config|
# There is a centos/7 box but it doesn't have rsync or virtualbox guest
# stuff on there so its slow to use. So chef it is....
config.vm.box = "boxcutter/centos71"
config.vm.box = "elastic/centos-7-x86_64"
rpm_common config
end
config.vm.define "oel-6" do |config|
config.vm.box = "elastic/oraclelinux-6-x86_64"
rpm_common config
end
# This box hangs _forever_ on ```yum check-update```. I have no idea why.
# config.vm.define "oel-6", autostart: false do |config|
# config.vm.box = "boxcutter/oel66"
# rpm_common(config)
# end
config.vm.define "oel-7" do |config|
config.vm.box = "boxcutter/oel70"
config.vm.box = "elastic/oraclelinux-7-x86_64"
rpm_common config
end
config.vm.define "fedora-22" do |config|
# Fedora hosts their own 'cloud' images that aren't in Vagrant's Atlas but
# and are missing required stuff like rsync. It'd be nice if we could use
# them but they much slower to get up and running then the boxcutter image.
config.vm.box = "boxcutter/fedora22"
config.vm.box = "elastic/fedora-22-x86_64"
dnf_common config
end
config.vm.define "opensuse-13" do |config|
config.vm.box = "chef/opensuse-13"
config.vm.box_url = "http://opscode-vm-bento.s3.amazonaws.com/vagrant/virtualbox/opscode_opensuse-13.2-x86_64_chef-provisionerless.box"
config.vm.box = "elastic/opensuse-13-x86_64"
opensuse_common config
end
# The SLES boxes are not considered to be highest quality, but seem to be sufficient for a test run
config.vm.define "sles-12" do |config|
config.vm.box = "idar/sles12"
config.vm.box = "elastic/sles-12-x86_64"
sles_common config
end
# Switch the default share for the project root from /vagrant to

View File

@ -273,7 +273,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]TransportClientNodesService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]client[/\\]transport[/\\]support[/\\]TransportProxyClient.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterModule.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterStateObserver.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]cluster[/\\]ClusterStateUpdateTask.java" checks="LineLength" />
@ -390,7 +389,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]CollectionUtils.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]ExtensionPoint.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsExecutors.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]EsThreadPoolExecutor.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]PrioritizedEsThreadPoolExecutor.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadBarrier.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]common[/\\]util[/\\]concurrent[/\\]ThreadContext.java" checks="LineLength" />
@ -421,7 +419,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]env[/\\]NodeEnvironment.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]AsyncShardFetch.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]DanglingIndicesState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]Gateway.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayAllocator.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayMetaState.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]gateway[/\\]GatewayService.java" checks="LineLength" />
@ -438,7 +435,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]AlreadyExpiredException.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]CompositeIndexEventListener.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexModule.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexSettings.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]IndexingSlowLog.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]MergePolicyConfig.java" checks="LineLength" />
@ -461,7 +457,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]codec[/\\]PerFieldMappingPostingFormatCodec.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ElasticsearchConcurrentMergeScheduler.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]Engine.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]EngineConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]InternalEngine.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]LiveVersionMap.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]engine[/\\]ShadowEngine.java" checks="LineLength" />
@ -606,7 +601,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexSearcherWrapper.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexShard.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]IndexingStats.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]ShadowIndexShard.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]ShardPath.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]ShardStateMetaData.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]shard[/\\]StoreRecovery.java" checks="LineLength" />
@ -614,7 +608,6 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]similarity[/\\]SimilarityService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]snapshots[/\\]blobstore[/\\]BlobStoreIndexShardRepository.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]snapshots[/\\]blobstore[/\\]BlobStoreIndexShardSnapshots.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]FsDirectoryService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStore.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]IndexStoreConfig.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]index[/\\]store[/\\]LegacyVerification.java" checks="LineLength" />
@ -666,10 +659,7 @@
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]process[/\\]ProcessService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]Node.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]node[/\\]internal[/\\]InternalSettingsPreparer.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateContext.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateDocumentParser.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQuery.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorService.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]DummyPluginInfo.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]InstallPluginCommand.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]main[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsService.java" checks="LineLength" />
@ -1329,14 +1319,9 @@
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]monitor[/\\]os[/\\]OsProbeTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]nodesinfo[/\\]NodeInfoStreamingTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]options[/\\]detailederrors[/\\]DetailedErrorsEnabledIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]ConcurrentPercolatorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]MultiPercolatorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolateDocumentParserTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorQueryTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]PercolatorServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]RecoveryPercolatorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]percolator[/\\]TTLPercolatorIT.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginInfoTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]plugins[/\\]PluginsServiceTests.java" checks="LineLength" />
<suppress files="core[/\\]src[/\\]test[/\\]java[/\\]org[/\\]elasticsearch[/\\]recovery[/\\]FullRollingRestartIT.java" checks="LineLength" />

View File

@ -1,31 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.lucene.index.memory;
/**
* This class overwrites {@link MemoryIndex} to make the reuse constructor visible.
*/
public final class ExtendedMemoryIndex extends MemoryIndex {
public ExtendedMemoryIndex(boolean storeOffsets, boolean storePayloads, long maxReusedBytes) {
super(storeOffsets, storePayloads, maxReusedBytes);
}
}

View File

@ -645,8 +645,6 @@ public class ElasticsearchException extends RuntimeException implements ToXConte
// 87 used to be for MergeMappingException
INVALID_INDEX_TEMPLATE_EXCEPTION(org.elasticsearch.indices.InvalidIndexTemplateException.class,
org.elasticsearch.indices.InvalidIndexTemplateException::new, 88),
PERCOLATE_EXCEPTION(org.elasticsearch.percolator.PercolateException.class,
org.elasticsearch.percolator.PercolateException::new, 89),
REFRESH_FAILED_ENGINE_EXCEPTION(org.elasticsearch.index.engine.RefreshFailedEngineException.class,
org.elasticsearch.index.engine.RefreshFailedEngineException::new, 90),
AGGREGATION_INITIALIZATION_EXCEPTION(org.elasticsearch.search.aggregations.AggregationInitializationException.class,

View File

@ -165,7 +165,6 @@ import org.elasticsearch.action.percolate.MultiPercolateAction;
import org.elasticsearch.action.percolate.PercolateAction;
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
import org.elasticsearch.action.percolate.TransportPercolateAction;
import org.elasticsearch.action.percolate.TransportShardMultiPercolateAction;
import org.elasticsearch.action.search.ClearScrollAction;
import org.elasticsearch.action.search.MultiSearchAction;
import org.elasticsearch.action.search.SearchAction;
@ -331,7 +330,7 @@ public class ActionModule extends AbstractModule {
registerAction(SearchScrollAction.INSTANCE, TransportSearchScrollAction.class);
registerAction(MultiSearchAction.INSTANCE, TransportMultiSearchAction.class);
registerAction(PercolateAction.INSTANCE, TransportPercolateAction.class);
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class, TransportShardMultiPercolateAction.class);
registerAction(MultiPercolateAction.INSTANCE, TransportMultiPercolateAction.class);
registerAction(ExplainAction.INSTANCE, TransportExplainAction.class);
registerAction(ClearScrollAction.INSTANCE, TransportClearScrollAction.class);
registerAction(RecoveryAction.INSTANCE, TransportRecoveryAction.class);

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ClusterStateObserver;
import org.elasticsearch.cluster.ClusterStateUpdateTask;
@ -32,6 +31,7 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.UnassignedInfo;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -24,8 +24,8 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -23,8 +23,8 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -20,7 +20,7 @@
package org.elasticsearch.action.admin.cluster.node.liveness;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportChannel;

View File

@ -23,8 +23,8 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -26,10 +26,10 @@ import org.elasticsearch.action.admin.cluster.node.tasks.list.TaskInfo;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -26,8 +26,8 @@ import org.elasticsearch.action.TaskOperationFailure;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.tasks.TransportTasksAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.repositories.delete;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.RepositoriesService;

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.repositories.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -30,6 +29,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.RepositoriesMetaData;
import org.elasticsearch.cluster.metadata.RepositoryMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.repositories.put;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.RepositoriesService;

View File

@ -23,11 +23,11 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.repositories.RepositoriesService;

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -31,6 +30,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.AckedClusterStateUpdateTask;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -33,6 +32,7 @@ import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.allocation.AllocationService;
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Priority;
import org.elasticsearch.common.inject.Inject;

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.cluster.shards;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -31,6 +30,7 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.create;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.SnapshotInfo;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.delete;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.SnapshotsService;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.cluster.snapshots.restore;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.snapshots.RestoreInfo;

View File

@ -28,10 +28,10 @@ import org.elasticsearch.action.support.nodes.BaseNodesRequest;
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

View File

@ -24,13 +24,13 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.SnapshotsInProgress;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.SnapshotId;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexMetaData;
@ -32,6 +31,7 @@ import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.metadata.MetaData.Custom;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -31,7 +31,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.cache.query.QueryCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats;
@ -48,7 +48,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
private QueryCacheStats queryCache;
private CompletionStats completion;
private SegmentsStats segments;
private PercolateStats percolate;
private PercolatorQueryCacheStats percolatorCache;
private ClusterStatsIndices() {
}
@ -62,7 +62,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
this.queryCache = new QueryCacheStats();
this.completion = new CompletionStats();
this.segments = new SegmentsStats();
this.percolate = new PercolateStats();
this.percolatorCache = new PercolatorQueryCacheStats();
for (ClusterStatsNodeResponse r : nodeResponses) {
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
@ -85,7 +85,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
queryCache.add(shardCommonStats.queryCache);
completion.add(shardCommonStats.completion);
segments.add(shardCommonStats.segments);
percolate.add(shardCommonStats.percolate);
percolatorCache.add(shardCommonStats.percolatorCache);
}
}
@ -128,8 +128,8 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
return segments;
}
public PercolateStats getPercolate() {
return percolate;
public PercolatorQueryCacheStats getPercolatorCache() {
return percolatorCache;
}
@Override
@ -142,7 +142,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
queryCache = QueryCacheStats.readQueryCacheStats(in);
completion = CompletionStats.readCompletionStats(in);
segments = SegmentsStats.readSegmentsStats(in);
percolate = PercolateStats.readPercolateStats(in);
percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in);
}
@Override
@ -155,7 +155,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
queryCache.writeTo(out);
completion.writeTo(out);
segments.writeTo(out);
percolate.writeTo(out);
percolatorCache.writeTo(out);
}
public static ClusterStatsIndices readIndicesStats(StreamInput in) throws IOException {
@ -178,7 +178,7 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
queryCache.toXContent(builder, params);
completion.toXContent(builder, params);
segments.toXContent(builder, params);
percolate.toXContent(builder, params);
percolatorCache.toXContent(builder, params);
return builder;
}

View File

@ -28,10 +28,10 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
import org.elasticsearch.action.support.nodes.TransportNodesAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.health.ClusterHealthStatus;
import org.elasticsearch.cluster.health.ClusterStateHealth;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
@ -56,7 +56,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.QueryCache, CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments,
CommonStatsFlags.Flag.Percolate);
CommonStatsFlags.Flag.PercolatorCache);
private final NodeService nodeService;
private final IndicesService indicesService;
@ -105,7 +105,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
for (IndexShard indexShard : indexService) {
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
// only report on fully started shards
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
shardsStats.add(new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, SHARD_STATS_FLAGS), indexShard.commitStats()));
}
}
}

View File

@ -22,11 +22,11 @@ package org.elasticsearch.action.admin.cluster.tasks;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.cluster.service.PendingClusterTask;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest.AliasActions;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -31,6 +30,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.AliasAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexAliasesService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.rest.action.admin.indices.alias.delete.AliasesNotFoundException;

View File

@ -22,11 +22,11 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.alias.get.GetAliasesRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.alias.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -32,11 +32,11 @@ import org.apache.lucene.util.IOUtils;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.FastStringReader;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.cache.clear;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;

View File

@ -23,13 +23,13 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.create;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataCreateIndexService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.indices.IndexAlreadyExistsException;

View File

@ -23,12 +23,12 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataDeleteIndexService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;

View File

@ -23,11 +23,11 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexNotFoundException;

View File

@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.exists.types;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -23,8 +23,8 @@ import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;

View File

@ -22,12 +22,11 @@ package org.elasticsearch.action.admin.indices.flush;
import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.forcemerge;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;

View File

@ -24,7 +24,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.indices.get.GetIndexRequest.Feature;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -32,6 +31,7 @@ import org.elasticsearch.cluster.metadata.AliasMetaData;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;

View File

@ -22,9 +22,9 @@ package org.elasticsearch.action.admin.indices.mapping.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -23,12 +23,12 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.MapBuilder;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.indices.mapping.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.info.TransportClusterInfoAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.mapping.put;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataMappingService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;

View File

@ -23,13 +23,13 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DestructiveOperations;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexStateService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.recovery;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;

View File

@ -24,8 +24,8 @@ import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.TransportBroadcastReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;

View File

@ -23,12 +23,11 @@ import org.elasticsearch.action.ReplicationResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.replication.BasicReplicationRequest;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.segments;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.admin.indices.settings.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;

View File

@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.settings.put;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
@ -30,6 +29,7 @@ import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.Index;
@ -80,6 +80,7 @@ public class TransportUpdateSettingsAction extends TransportMasterNodeAction<Upd
UpdateSettingsClusterStateUpdateRequest clusterStateUpdateRequest = new UpdateSettingsClusterStateUpdateRequest()
.indices(concreteIndices)
.settings(request.settings())
.setPreserveExisting(request.isPreserveExisting())
.ackTimeout(request.timeout())
.masterNodeTimeout(request.masterNodeTimeout());

View File

@ -29,8 +29,23 @@ public class UpdateSettingsClusterStateUpdateRequest extends IndicesClusterState
private Settings settings;
public UpdateSettingsClusterStateUpdateRequest() {
private boolean preserveExisting = false;
/**
* Returns <code>true</code> iff the settings update should only add but not update settings. If the setting already exists
* it should not be overwritten by this update. The default is <code>false</code>
*/
public boolean isPreserveExisting() {
return preserveExisting;
}
/**
* Iff set to <code>true</code> this settings update will only add settings not already set on an index. Existing settings remain
* unchanged.
*/
public UpdateSettingsClusterStateUpdateRequest setPreserveExisting(boolean preserveExisting) {
this.preserveExisting = preserveExisting;
return this;
}
/**

View File

@ -47,6 +47,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
private String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.fromOptions(false, false, true, true);
private Settings settings = EMPTY_SETTINGS;
private boolean preserveExisting = false;
public UpdateSettingsRequest() {
}
@ -127,6 +128,23 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
return this;
}
/**
* Returns <code>true</code> iff the settings update should only add but not update settings. If the setting already exists
* it should not be overwritten by this update. The default is <code>false</code>
*/
public boolean isPreserveExisting() {
return preserveExisting;
}
/**
* Iff set to <code>true</code> this settings update will only add settings not already set on an index. Existing settings remain
* unchanged.
*/
public UpdateSettingsRequest setPreserveExisting(boolean preserveExisting) {
this.preserveExisting = preserveExisting;
return this;
}
/**
* Sets the settings to be updated (either json/yaml/properties format)
*/
@ -149,6 +167,7 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
indicesOptions = IndicesOptions.readIndicesOptions(in);
settings = readSettingsFromStream(in);
readTimeout(in);
preserveExisting = in.readBoolean();
}
@Override
@ -158,5 +177,6 @@ public class UpdateSettingsRequest extends AcknowledgedRequest<UpdateSettingsReq
indicesOptions.writeIndicesOptions(out);
writeSettingsToStream(settings, out);
writeTimeout(out);
out.writeBoolean(preserveExisting);
}
}

View File

@ -84,4 +84,9 @@ public class UpdateSettingsRequestBuilder extends AcknowledgedRequestBuilder<Upd
request.settings(source);
return this;
}
public UpdateSettingsRequestBuilder setPreserveExisting(boolean preserveExisting) {
request.setPreserveExisting(preserveExisting);
return this;
}
}

View File

@ -23,7 +23,6 @@ import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.FailedNodeException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -38,6 +37,7 @@ import org.elasticsearch.cluster.routing.IndexShardRoutingTable;
import org.elasticsearch.cluster.routing.RoutingNodes;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.ImmutableOpenIntMap;
import org.elasticsearch.common.collect.ImmutableOpenMap;
import org.elasticsearch.common.inject.Inject;

View File

@ -32,9 +32,10 @@ import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.flush.FlushStats;
import org.elasticsearch.index.get.GetStats;
import org.elasticsearch.index.percolator.PercolatorQueryCache;
import org.elasticsearch.index.shard.IndexingStats;
import org.elasticsearch.index.merge.MergeStats;
import org.elasticsearch.index.percolator.PercolateStats;
import org.elasticsearch.index.percolator.PercolatorQueryCacheStats;
import org.elasticsearch.index.recovery.RecoveryStats;
import org.elasticsearch.index.refresh.RefreshStats;
import org.elasticsearch.index.search.stats.SearchStats;
@ -101,8 +102,8 @@ public class CommonStats implements Streamable, ToXContent {
case Segments:
segments = new SegmentsStats();
break;
case Percolate:
percolate = new PercolateStats();
case PercolatorCache:
percolatorCache = new PercolatorQueryCacheStats();
break;
case Translog:
translog = new TranslogStats();
@ -123,7 +124,8 @@ public class CommonStats implements Streamable, ToXContent {
}
public CommonStats(IndicesQueryCache indicesQueryCache, IndexShard indexShard, CommonStatsFlags flags) {
public CommonStats(IndicesQueryCache indicesQueryCache, PercolatorQueryCache percolatorQueryCache,
IndexShard indexShard, CommonStatsFlags flags) {
CommonStatsFlags.Flag[] setFlags = flags.getFlags();
@ -168,8 +170,8 @@ public class CommonStats implements Streamable, ToXContent {
case Segments:
segments = indexShard.segmentStats(flags.includeSegmentFileSizes());
break;
case Percolate:
percolate = indexShard.percolateStats();
case PercolatorCache:
percolatorCache = percolatorQueryCache.getStats(indexShard.shardId());
break;
case Translog:
translog = indexShard.translogStats();
@ -223,7 +225,7 @@ public class CommonStats implements Streamable, ToXContent {
public FieldDataStats fieldData;
@Nullable
public PercolateStats percolate;
public PercolatorQueryCacheStats percolatorCache;
@Nullable
public CompletionStats completion;
@ -333,13 +335,13 @@ public class CommonStats implements Streamable, ToXContent {
} else {
fieldData.add(stats.getFieldData());
}
if (percolate == null) {
if (stats.getPercolate() != null) {
percolate = new PercolateStats();
percolate.add(stats.getPercolate());
if (percolatorCache == null) {
if (stats.getPercolatorCache() != null) {
percolatorCache = new PercolatorQueryCacheStats();
percolatorCache.add(stats.getPercolatorCache());
}
} else {
percolate.add(stats.getPercolate());
percolatorCache.add(stats.getPercolatorCache());
}
if (completion == null) {
if (stats.getCompletion() != null) {
@ -447,8 +449,8 @@ public class CommonStats implements Streamable, ToXContent {
}
@Nullable
public PercolateStats getPercolate() {
return percolate;
public PercolatorQueryCacheStats getPercolatorCache() {
return percolatorCache;
}
@Nullable
@ -489,7 +491,7 @@ public class CommonStats implements Streamable, ToXContent {
/**
* Utility method which computes total memory by adding
* FieldData, Percolate, Segments (memory, index writer, version map)
* FieldData, PercolatorCache, Segments (memory, index writer, version map)
*/
public ByteSizeValue getTotalMemory() {
long size = 0;
@ -499,9 +501,6 @@ public class CommonStats implements Streamable, ToXContent {
if (this.getQueryCache() != null) {
size += this.getQueryCache().getMemorySizeInBytes();
}
if (this.getPercolate() != null) {
size += this.getPercolate().getMemorySizeInBytes();
}
if (this.getSegments() != null) {
size += this.getSegments().getMemoryInBytes() +
this.getSegments().getIndexWriterMemoryInBytes() +
@ -547,7 +546,7 @@ public class CommonStats implements Streamable, ToXContent {
fieldData = FieldDataStats.readFieldDataStats(in);
}
if (in.readBoolean()) {
percolate = PercolateStats.readPercolateStats(in);
percolatorCache = PercolatorQueryCacheStats.readPercolateStats(in);
}
if (in.readBoolean()) {
completion = CompletionStats.readCompletionStats(in);
@ -629,11 +628,11 @@ public class CommonStats implements Streamable, ToXContent {
out.writeBoolean(true);
fieldData.writeTo(out);
}
if (percolate == null) {
if (percolatorCache == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
percolate.writeTo(out);
percolatorCache.writeTo(out);
}
if (completion == null) {
out.writeBoolean(false);
@ -689,8 +688,8 @@ public class CommonStats implements Streamable, ToXContent {
if (fieldData != null) {
fieldData.toXContent(builder, params);
}
if (percolate != null) {
percolate.toXContent(builder, params);
if (percolatorCache != null) {
percolatorCache.toXContent(builder, params);
}
if (completion != null) {
completion.toXContent(builder, params);

View File

@ -240,7 +240,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
FieldData("fielddata"),
Docs("docs"),
Warmer("warmer"),
Percolate("percolate"),
PercolatorCache("percolator_cache"),
Completion("completion"),
Segments("segments"),
Translog("translog"),

View File

@ -185,12 +185,12 @@ public class IndicesStatsRequest extends BroadcastRequest<IndicesStatsRequest> {
}
public IndicesStatsRequest percolate(boolean percolate) {
flags.set(Flag.Percolate, percolate);
flags.set(Flag.PercolatorCache, percolate);
return this;
}
public boolean percolate() {
return flags.isSet(Flag.Percolate);
return flags.isSet(Flag.PercolatorCache);
}
public IndicesStatsRequest segments(boolean segments) {

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.stats;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;
@ -140,7 +140,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
flags.fieldDataFields(request.fieldDataFields());
}
if (request.percolate()) {
flags.set(CommonStatsFlags.Flag.Percolate);
flags.set(CommonStatsFlags.Flag.PercolatorCache);
}
if (request.segments()) {
flags.set(CommonStatsFlags.Flag.Segments);
@ -163,6 +163,6 @@ public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<
flags.set(CommonStatsFlags.Flag.Recovery);
}
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexShard, flags), indexShard.commitStats());
return new ShardStats(indexShard.routingEntry(), indexShard.shardPath(), new CommonStats(indicesService.getIndicesQueryCache(), indexService.cache().getPercolatorQueryCache(), indexShard, flags), indexShard.commitStats());
}
}

View File

@ -21,12 +21,12 @@ package org.elasticsearch.action.admin.indices.template.delete;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -22,12 +22,12 @@ import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.regex.Regex;
import org.elasticsearch.common.settings.Settings;

View File

@ -21,13 +21,13 @@ package org.elasticsearch.action.admin.indices.template.put;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataIndexTemplateService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.IndexScopedSettings;
import org.elasticsearch.common.settings.Settings;

View File

@ -23,13 +23,13 @@ import org.elasticsearch.Version;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.settings.Settings;

View File

@ -25,7 +25,6 @@ import org.elasticsearch.action.PrimaryMissingActionException;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
@ -35,6 +34,7 @@ import org.elasticsearch.cluster.routing.IndexRoutingTable;
import org.elasticsearch.cluster.routing.RoutingTable;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.routing.ShardsIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;

View File

@ -22,13 +22,13 @@ package org.elasticsearch.action.admin.indices.upgrade.post;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.ack.ClusterStateUpdateResponse;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MetaDataUpdateSettingsService;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.threadpool.ThreadPool;

View File

@ -29,13 +29,13 @@ import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.ParsingException;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.inject.Inject;

View File

@ -37,13 +37,13 @@ import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.update.TransportUpdateAction;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;

View File

@ -35,12 +35,12 @@ import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.action.update.UpdateHelper;
import org.elasticsearch.action.update.UpdateRequest;
import org.elasticsearch.action.update.UpdateResponse;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;

View File

@ -28,13 +28,12 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -27,10 +27,10 @@ import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cache.recycler.PageCacheRecycler;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.BigArrays;

View File

@ -28,6 +28,8 @@ import org.elasticsearch.common.joda.Joda;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.mapper.ip.IpFieldMapper;
import org.joda.time.DateTime;
import java.io.IOException;
@ -146,6 +148,17 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
*/
protected abstract T valueOf(String value, String optionalFormat);
/**
* @param value
* The value to be converted to a String
* @param optionalFormat
* A string describing how to print the specified value. Whether
* this parameter is supported depends on the implementation. If
* optionalFormat is specified and the implementation doesn't
* support it an {@link UnsupportedOperationException} is thrown
*/
public abstract String stringValueOf(Object value, String optionalFormat);
/**
* Merges the provided stats into this stats instance.
*/
@ -274,6 +287,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return java.lang.Long.valueOf(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof Number) {
return java.lang.Long.toString(((Number) value).longValue());
} else {
throw new IllegalArgumentException("value must be a Long: " + value);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -327,6 +352,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return java.lang.Float.valueOf(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof Number) {
return java.lang.Float.toString(((Number) value).floatValue());
} else {
throw new IllegalArgumentException("value must be a Float: " + value);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -380,6 +417,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return java.lang.Double.valueOf(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof Number) {
return java.lang.Double.toString(((Number) value).doubleValue());
} else {
throw new IllegalArgumentException("value must be a Double: " + value);
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -437,6 +486,18 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return new BytesRef(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (optionalFormat != null) {
throw new UnsupportedOperationException("custom format isn't supported");
}
if (value instanceof BytesRef) {
return ((BytesRef) value).utf8ToString();
} else {
throw new IllegalArgumentException("value must be a BytesRef: " + value);
}
}
@Override
protected void toInnerXContent(XContentBuilder builder) throws IOException {
builder.field(Fields.MIN_VALUE, getMinValueAsString());
@ -490,6 +551,25 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
return dateFormatter.parser().parseMillis(value);
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
FormatDateTimeFormatter dateFormatter = this.dateFormatter;
if (optionalFormat != null) {
dateFormatter = Joda.forPattern(optionalFormat);
}
long millis;
if (value instanceof java.lang.Long) {
millis = ((java.lang.Long) value).longValue();
} else if (value instanceof DateTime) {
millis = ((DateTime) value).getMillis();
} else if (value instanceof BytesRef) {
millis = dateFormatter.parser().parseMillis(((BytesRef) value).utf8ToString());
} else {
throw new IllegalArgumentException("value must be either a DateTime or a long: " + value);
}
return dateFormatter.printer().print(millis);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
@ -504,6 +584,28 @@ public abstract class FieldStats<T extends Comparable<T>> implements Streamable,
}
public static class Ip extends Long {
public Ip(int maxDoc, int docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) {
super(maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue);
}
protected Ip(int type, long maxDoc, long docCount, long sumDocFreq, long sumTotalTermFreq, long minValue, long maxValue) {
super(type, maxDoc, docCount, sumDocFreq, sumTotalTermFreq, minValue, maxValue);
}
public Ip() {
}
@Override
public String stringValueOf(Object value, String optionalFormat) {
if (value instanceof BytesRef) {
return super.stringValueOf(IpFieldMapper.ipToLong(((BytesRef) value).utf8ToString()), optionalFormat);
}
return super.stringValueOf(value, optionalFormat);
}
}
public static FieldStats read(StreamInput in) throws IOException {
FieldStats stats;
byte type = in.readByte();

View File

@ -28,13 +28,13 @@ import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;

View File

@ -22,12 +22,12 @@ package org.elasticsearch.action.get;
import org.elasticsearch.action.RoutingMissingException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.Preference;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;

View File

@ -22,10 +22,10 @@ package org.elasticsearch.action.get;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;

View File

@ -23,10 +23,10 @@ import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.IndexService;

View File

@ -28,7 +28,6 @@ import org.elasticsearch.action.admin.indices.create.TransportCreateIndexAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.AutoCreateIndex;
import org.elasticsearch.action.support.replication.TransportReplicationAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.action.index.MappingUpdatedAction;
import org.elasticsearch.cluster.action.shard.ShardStateAction;
@ -36,6 +35,7 @@ import org.elasticsearch.cluster.metadata.IndexMetaData;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.metadata.MappingMetaData;
import org.elasticsearch.cluster.metadata.MetaData;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;

View File

@ -22,11 +22,11 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore;

View File

@ -22,11 +22,11 @@ package org.elasticsearch.action.ingest;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore;

View File

@ -29,9 +29,9 @@ import org.elasticsearch.action.index.IndexAction;
import org.elasticsearch.action.index.IndexRequest;
import org.elasticsearch.action.support.ActionFilter;
import org.elasticsearch.action.support.ActionFilterChain;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Randomness;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.inject.Inject;

View File

@ -26,12 +26,12 @@ import org.elasticsearch.action.admin.cluster.node.info.NodesInfoResponse;
import org.elasticsearch.action.admin.cluster.node.info.TransportNodesInfoAction;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.ingest.PipelineStore;

View File

@ -163,11 +163,7 @@ public class MultiPercolateRequest extends ActionRequest<MultiPercolateRequest>
@Override
public List<? extends IndicesRequest> subRequests() {
List<IndicesRequest> indicesRequests = new ArrayList<>();
for (PercolateRequest percolateRequest : this.requests) {
indicesRequests.addAll(percolateRequest.subRequests());
}
return indicesRequests;
return requests;
}
private void parsePercolateAction(XContentParser parser, PercolateRequest percolateRequest, boolean allowExplicitIndex) throws IOException {

View File

@ -19,10 +19,12 @@
package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchGenerationException;
import org.elasticsearch.action.ActionRequest;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.CompositeIndicesRequest;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.bytes.BytesArray;
@ -43,49 +45,37 @@ import static org.elasticsearch.action.ValidateActions.addValidationError;
/**
* A request to execute a percolate operation.
*/
public class PercolateRequest extends BroadcastRequest<PercolateRequest> implements CompositeIndicesRequest {
public class PercolateRequest extends ActionRequest<PercolateRequest> implements IndicesRequest.Replaceable {
protected String[] indices;
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
private String documentType;
private String routing;
private String preference;
private GetRequest getRequest;
private boolean onlyCount;
private GetRequest getRequest;
private BytesReference source;
private BytesReference docSource;
// Used internally in order to compute tookInMillis, TransportBroadcastAction itself doesn't allow
// to hold it temporarily in an easy way
long startTime;
/**
* Constructor only for internal usage.
*/
public PercolateRequest() {
public String[] indices() {
return indices;
}
PercolateRequest(PercolateRequest request, BytesReference docSource) {
this.indices = request.indices();
this.documentType = request.documentType();
this.routing = request.routing();
this.preference = request.preference();
this.source = request.source;
this.docSource = docSource;
this.onlyCount = request.onlyCount;
this.startTime = request.startTime;
public final PercolateRequest indices(String... indices) {
this.indices = indices;
return this;
}
@Override
public List<? extends IndicesRequest> subRequests() {
List<IndicesRequest> requests = new ArrayList<>();
requests.add(this);
if (getRequest != null) {
requests.add(getRequest);
}
return requests;
public IndicesOptions indicesOptions() {
return indicesOptions;
}
public PercolateRequest indicesOptions(IndicesOptions indicesOptions) {
this.indicesOptions = indicesOptions;
return this;
}
/**
* Getter for {@link #documentType(String)}
*/
@ -244,13 +234,9 @@ public class PercolateRequest extends BroadcastRequest<PercolateRequest> impleme
return this;
}
BytesReference docSource() {
return docSource;
}
@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
ActionRequestValidationException validationException = null;
if (documentType == null) {
validationException = addValidationError("type is missing", validationException);
}
@ -266,12 +252,12 @@ public class PercolateRequest extends BroadcastRequest<PercolateRequest> impleme
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
startTime = in.readVLong();
indices = in.readStringArray();
indicesOptions = IndicesOptions.readIndicesOptions(in);
documentType = in.readString();
routing = in.readOptionalString();
preference = in.readOptionalString();
source = in.readBytesReference();
docSource = in.readBytesReference();
if (in.readBoolean()) {
getRequest = new GetRequest();
getRequest.readFrom(in);
@ -282,12 +268,12 @@ public class PercolateRequest extends BroadcastRequest<PercolateRequest> impleme
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVLong(startTime);
out.writeStringArrayNullable(indices);
indicesOptions.writeIndicesOptions(out);
out.writeString(documentType);
out.writeOptionalString(routing);
out.writeOptionalString(preference);
out.writeBytesReference(source);
out.writeBytesReference(docSource);
if (getRequest != null) {
out.writeBoolean(true);
getRequest.writeTo(out);

View File

@ -18,7 +18,9 @@
*/
package org.elasticsearch.action.percolate;
import org.elasticsearch.action.ActionRequestBuilder;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.support.IndicesOptions;
import org.elasticsearch.action.support.broadcast.BroadcastOperationRequestBuilder;
import org.elasticsearch.client.ElasticsearchClient;
import org.elasticsearch.common.Strings;
@ -36,7 +38,7 @@ import java.util.Map;
/**
* A builder the easy to use of defining a percolate request.
*/
public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder<PercolateRequest, PercolateResponse, PercolateRequestBuilder> {
public class PercolateRequestBuilder extends ActionRequestBuilder<PercolateRequest, PercolateResponse, PercolateRequestBuilder> {
private PercolateSourceBuilder sourceBuilder;
@ -44,6 +46,16 @@ public class PercolateRequestBuilder extends BroadcastOperationRequestBuilder<Pe
super(client, action, new PercolateRequest());
}
public PercolateRequestBuilder setIndices(String... indices) {
request.indices(indices);
return this;
}
public PercolateRequestBuilder setIndicesOptions(IndicesOptions indicesOptions) {
request.indicesOptions(indicesOptions);
return this;
}
/**
* Sets the type of the document to percolate. This is important as it selects the mapping to be used to parse
* the document.

View File

@ -29,7 +29,6 @@ import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.highlight.HighlightField;
@ -47,6 +46,8 @@ import java.util.Map;
public class PercolateResponse extends BroadcastResponse implements Iterable<PercolateResponse.Match>, ToXContent {
public static final Match[] EMPTY = new Match[0];
// PercolatorQuery emits this score if no 'query' is defined in the percolate request
public final static float NO_SCORE = 0.0f;
private long tookInMillis;
private Match[] matches;
@ -65,15 +66,6 @@ public class PercolateResponse extends BroadcastResponse implements Iterable<Per
this.aggregations = aggregations;
}
PercolateResponse(int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures, long tookInMillis, Match[] matches) {
super(totalShards, successfulShards, failedShards, shardFailures);
if (tookInMillis < 0) {
throw new IllegalArgumentException("tookInMillis must be positive but was: " + tookInMillis);
}
this.tookInMillis = tookInMillis;
this.matches = matches;
}
PercolateResponse() {
}
@ -136,10 +128,10 @@ public class PercolateResponse extends BroadcastResponse implements Iterable<Per
builder.field(Fields._INDEX, match.getIndex());
builder.field(Fields._ID, match.getId());
float score = match.getScore();
if (score != PercolatorService.NO_SCORE) {
if (score != NO_SCORE) {
builder.field(Fields._SCORE, match.getScore());
}
if (match.getHighlightFields() != null) {
if (match.getHighlightFields().isEmpty() == false) {
builder.startObject(Fields.HIGHLIGHT);
for (HighlightField field : match.getHighlightFields().values()) {
builder.field(field.name());

View File

@ -1,130 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.percolate;
import org.elasticsearch.action.OriginalIndices;
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.index.shard.ShardId;
import java.io.IOException;
/**
*/
public class PercolateShardRequest extends BroadcastShardRequest {
private String documentType;
private BytesReference source;
private BytesReference docSource;
private boolean onlyCount;
private int numberOfShards;
private long startTime;
public PercolateShardRequest() {
}
PercolateShardRequest(ShardId shardId, int numberOfShards, PercolateRequest request) {
super(shardId, request);
this.documentType = request.documentType();
this.source = request.source();
this.docSource = request.docSource();
this.onlyCount = request.onlyCount();
this.numberOfShards = numberOfShards;
this.startTime = request.startTime;
}
PercolateShardRequest(ShardId shardId, PercolateRequest request) {
super(shardId, request);
this.documentType = request.documentType();
this.source = request.source();
this.docSource = request.docSource();
this.onlyCount = request.onlyCount();
this.startTime = request.startTime;
}
public String documentType() {
return documentType;
}
public BytesReference source() {
return source;
}
public BytesReference docSource() {
return docSource;
}
public boolean onlyCount() {
return onlyCount;
}
public void documentType(String documentType) {
this.documentType = documentType;
}
public void source(BytesReference source) {
this.source = source;
}
public void docSource(BytesReference docSource) {
this.docSource = docSource;
}
void onlyCount(boolean onlyCount) {
this.onlyCount = onlyCount;
}
public int getNumberOfShards() {
return numberOfShards;
}
public long getStartTime() {
return startTime;
}
OriginalIndices originalIndices() {
return originalIndices;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
documentType = in.readString();
source = in.readBytesReference();
docSource = in.readBytesReference();
onlyCount = in.readBoolean();
numberOfShards = in.readVInt();
startTime = in.readLong(); // no vlong, this can be negative!
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(documentType);
out.writeBytesReference(source);
out.writeBytesReference(docSource);
out.writeBoolean(onlyCount);
out.writeVInt(numberOfShards);
out.writeLong(startTime);
}
}

View File

@ -1,180 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.percolate;
import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.lucene.Lucene;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolateContext;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregator;
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorStreams;
import org.elasticsearch.search.aggregations.pipeline.SiblingPipelineAggregator;
import org.elasticsearch.search.highlight.HighlightField;
import org.elasticsearch.search.query.QuerySearchResult;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
*/
public class PercolateShardResponse extends BroadcastShardResponse {
private TopDocs topDocs;
private Map<Integer, String> ids;
private Map<Integer, Map<String, HighlightField>> hls;
private boolean onlyCount;
private int requestedSize;
private InternalAggregations aggregations;
private List<SiblingPipelineAggregator> pipelineAggregators;
PercolateShardResponse() {
}
public PercolateShardResponse(TopDocs topDocs, Map<Integer, String> ids, Map<Integer, Map<String, HighlightField>> hls, PercolateContext context) {
super(context.indexShard().shardId());
this.topDocs = topDocs;
this.ids = ids;
this.hls = hls;
this.onlyCount = context.isOnlyCount();
this.requestedSize = context.size();
QuerySearchResult result = context.queryResult();
if (result != null) {
if (result.aggregations() != null) {
this.aggregations = (InternalAggregations) result.aggregations();
}
this.pipelineAggregators = result.pipelineAggregators();
}
}
public TopDocs topDocs() {
return topDocs;
}
/**
* Returns per match the percolator query id. The key is the Lucene docId of the matching percolator query.
*/
public Map<Integer, String> ids() {
return ids;
}
public int requestedSize() {
return requestedSize;
}
/**
* Returns per match the highlight snippets. The key is the Lucene docId of the matching percolator query.
*/
public Map<Integer, Map<String, HighlightField>> hls() {
return hls;
}
public InternalAggregations aggregations() {
return aggregations;
}
public List<SiblingPipelineAggregator> pipelineAggregators() {
return pipelineAggregators;
}
public boolean onlyCount() {
return onlyCount;
}
public boolean isEmpty() {
return topDocs.totalHits == 0;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
onlyCount = in.readBoolean();
requestedSize = in.readVInt();
topDocs = Lucene.readTopDocs(in);
int size = in.readVInt();
ids = new HashMap<>(size);
for (int i = 0; i < size; i++) {
ids.put(in.readVInt(), in.readString());
}
size = in.readVInt();
hls = new HashMap<>(size);
for (int i = 0; i < size; i++) {
int docId = in.readVInt();
int mSize = in.readVInt();
Map<String, HighlightField> fields = new HashMap<>();
for (int j = 0; j < mSize; j++) {
fields.put(in.readString(), HighlightField.readHighlightField(in));
}
hls.put(docId, fields);
}
aggregations = InternalAggregations.readOptionalAggregations(in);
if (in.readBoolean()) {
int pipelineAggregatorsSize = in.readVInt();
List<SiblingPipelineAggregator> pipelineAggregators = new ArrayList<>(pipelineAggregatorsSize);
for (int i = 0; i < pipelineAggregatorsSize; i++) {
BytesReference type = in.readBytesReference();
PipelineAggregator pipelineAggregator = PipelineAggregatorStreams.stream(type).readResult(in);
pipelineAggregators.add((SiblingPipelineAggregator) pipelineAggregator);
}
this.pipelineAggregators = pipelineAggregators;
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeBoolean(onlyCount);
out.writeVLong(requestedSize);
Lucene.writeTopDocs(out, topDocs);
out.writeVInt(ids.size());
for (Map.Entry<Integer, String> entry : ids.entrySet()) {
out.writeVInt(entry.getKey());
out.writeString(entry.getValue());
}
out.writeVInt(hls.size());
for (Map.Entry<Integer, Map<String, HighlightField>> entry1 : hls.entrySet()) {
out.writeVInt(entry1.getKey());
out.writeVInt(entry1.getValue().size());
for (Map.Entry<String, HighlightField> entry2 : entry1.getValue().entrySet()) {
out.writeString(entry2.getKey());
entry2.getValue().writeTo(out);
}
}
out.writeOptionalStreamable(aggregations);
if (pipelineAggregators == null) {
out.writeBoolean(false);
} else {
out.writeBoolean(true);
out.writeVInt(pipelineAggregators.size());
for (PipelineAggregator pipelineAggregator : pipelineAggregators) {
out.writeBytesReference(pipelineAggregator.type().stream());
pipelineAggregator.writeTo(out);
}
}
}
}

View File

@ -19,114 +19,91 @@
package org.elasticsearch.action.percolate;
import com.carrotsearch.hppc.IntArrayList;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.UnavailableShardsException;
import org.elasticsearch.action.get.GetRequest;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.MultiGetItemResponse;
import org.elasticsearch.action.get.MultiGetRequest;
import org.elasticsearch.action.get.MultiGetResponse;
import org.elasticsearch.action.get.TransportMultiGetAction;
import org.elasticsearch.action.search.MultiSearchRequest;
import org.elasticsearch.action.search.MultiSearchResponse;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.collect.Tuple;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.index.IndexNotFoundException;
import org.elasticsearch.index.engine.DocumentMissingException;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReferenceArray;
/**
*/
public class TransportMultiPercolateAction extends HandledTransportAction<MultiPercolateRequest, MultiPercolateResponse> {
private final ClusterService clusterService;
private final PercolatorService percolatorService;
private final TransportMultiGetAction multiGetAction;
private final TransportShardMultiPercolateAction shardMultiPercolateAction;
private final Client client;
private final ParseFieldMatcher parseFieldMatcher;
private final IndicesQueriesRegistry queryRegistry;
private final AggregatorParsers aggParsers;
@Inject
public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportShardMultiPercolateAction shardMultiPercolateAction,
ClusterService clusterService, TransportService transportService, PercolatorService percolatorService,
TransportMultiGetAction multiGetAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
public TransportMultiPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService, ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver, Client client, IndicesQueriesRegistry queryRegistry,
AggregatorParsers aggParsers) {
super(settings, MultiPercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, MultiPercolateRequest::new);
this.shardMultiPercolateAction = shardMultiPercolateAction;
this.clusterService = clusterService;
this.percolatorService = percolatorService;
this.multiGetAction = multiGetAction;
this.client = client;
this.aggParsers = aggParsers;
this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.queryRegistry = queryRegistry;
}
@Override
protected void doExecute(final MultiPercolateRequest request, final ActionListener<MultiPercolateResponse> listener) {
final ClusterState clusterState = clusterService.state();
clusterState.blocks().globalBlockedRaiseException(ClusterBlockLevel.READ);
final List<Object> percolateRequests = new ArrayList<>(request.requests().size());
// Can have a mixture of percolate requests. (normal percolate requests & percolate existing doc),
// so we need to keep track for what percolate request we had a get request
final IntArrayList getRequestSlots = new IntArrayList();
List<GetRequest> existingDocsRequests = new ArrayList<>();
for (int slot = 0; slot < request.requests().size(); slot++) {
PercolateRequest percolateRequest = request.requests().get(slot);
percolateRequest.startTime = System.currentTimeMillis();
percolateRequests.add(percolateRequest);
if (percolateRequest.getRequest() != null) {
existingDocsRequests.add(percolateRequest.getRequest());
getRequestSlots.add(slot);
protected void doExecute(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener) {
List<Tuple<Integer, GetRequest>> getRequests = new ArrayList<>();
for (int i = 0; i < request.requests().size(); i++) {
GetRequest getRequest = request.requests().get(i).getRequest();
if (getRequest != null) {
getRequests.add(new Tuple<>(i, getRequest));
}
}
if (!existingDocsRequests.isEmpty()) {
final MultiGetRequest multiGetRequest = new MultiGetRequest();
for (GetRequest getRequest : existingDocsRequests) {
multiGetRequest.add(
new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id())
.routing(getRequest.routing())
);
if (getRequests.isEmpty()) {
innerDoExecute(request, listener, Collections.emptyMap(), new HashMap<>());
} else {
MultiGetRequest multiGetRequest = new MultiGetRequest();
for (Tuple<Integer, GetRequest> tuple : getRequests) {
GetRequest getRequest = tuple.v2();
multiGetRequest.add(new MultiGetRequest.Item(getRequest.index(), getRequest.type(), getRequest.id()));
}
multiGetAction.execute(multiGetRequest, new ActionListener<MultiGetResponse>() {
client.multiGet(multiGetRequest, new ActionListener<MultiGetResponse>() {
@Override
public void onResponse(MultiGetResponse multiGetItemResponses) {
for (int i = 0; i < multiGetItemResponses.getResponses().length; i++) {
MultiGetItemResponse itemResponse = multiGetItemResponses.getResponses()[i];
int slot = getRequestSlots.get(i);
if (!itemResponse.isFailed()) {
GetResponse getResponse = itemResponse.getResponse();
if (getResponse.isExists()) {
PercolateRequest originalRequest = (PercolateRequest) percolateRequests.get(slot);
percolateRequests.set(slot, new PercolateRequest(originalRequest, getResponse.getSourceAsBytesRef()));
} else {
logger.trace("mpercolate existing doc, item[{}] doesn't exist", slot);
percolateRequests.set(slot, new DocumentMissingException(null, getResponse.getType(), getResponse.getId()));
}
public void onResponse(MultiGetResponse response) {
Map<Integer, BytesReference> getResponseSources = new HashMap<>(response.getResponses().length);
Map<Integer, MultiPercolateResponse.Item> preFailures = new HashMap<>();
for (int i = 0; i < response.getResponses().length; i++) {
MultiGetItemResponse itemResponse = response.getResponses()[i];
int originalSlot = getRequests.get(i).v1();
if (itemResponse.isFailed()) {
preFailures.put(originalSlot, new MultiPercolateResponse.Item(itemResponse.getFailure().getFailure()));
} else {
logger.trace("mpercolate existing doc, item[{}] failure {}", slot, itemResponse.getFailure());
percolateRequests.set(slot, itemResponse.getFailure());
if (itemResponse.getResponse().isExists()) {
getResponseSources.put(originalSlot, itemResponse.getResponse().getSourceAsBytesRef());
} else {
GetRequest getRequest = getRequests.get(i).v2();
preFailures.put(originalSlot, new MultiPercolateResponse.Item(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", getRequest.index(), getRequest.type(), getRequest.id())));
}
}
}
new ASyncAction(request, percolateRequests, listener, clusterState).run();
innerDoExecute(request, listener, getResponseSources, preFailures);
}
@Override
@ -134,200 +111,81 @@ public class TransportMultiPercolateAction extends HandledTransportAction<MultiP
listener.onFailure(e);
}
});
} else {
new ASyncAction(request, percolateRequests, listener, clusterState).run();
}
}
private final class ASyncAction {
final ActionListener<MultiPercolateResponse> finalListener;
final Map<ShardId, TransportShardMultiPercolateAction.Request> requestsByShard;
final MultiPercolateRequest multiPercolateRequest;
final List<Object> percolateRequests;
final Map<ShardId, IntArrayList> shardToSlots;
final AtomicInteger expectedOperations;
final AtomicArray<Object> reducedResponses;
final AtomicReferenceArray<AtomicInteger> expectedOperationsPerItem;
final AtomicReferenceArray<AtomicReferenceArray> responsesByItemAndShard;
ASyncAction(MultiPercolateRequest multiPercolateRequest, List<Object> percolateRequests, ActionListener<MultiPercolateResponse> finalListener, ClusterState clusterState) {
this.finalListener = finalListener;
this.multiPercolateRequest = multiPercolateRequest;
this.percolateRequests = percolateRequests;
responsesByItemAndShard = new AtomicReferenceArray<>(percolateRequests.size());
expectedOperationsPerItem = new AtomicReferenceArray<>(percolateRequests.size());
reducedResponses = new AtomicArray<>(percolateRequests.size());
// Resolving concrete indices and routing and grouping the requests by shard
requestsByShard = new HashMap<>();
// Keep track what slots belong to what shard, in case a request to a shard fails on all copies
shardToSlots = new HashMap<>();
int expectedResults = 0;
for (int slot = 0; slot < percolateRequests.size(); slot++) {
Object element = percolateRequests.get(slot);
assert element != null;
if (element instanceof PercolateRequest) {
PercolateRequest percolateRequest = (PercolateRequest) element;
String[] concreteIndices;
try {
concreteIndices = indexNameExpressionResolver.concreteIndexNames(clusterState, percolateRequest);
} catch (IndexNotFoundException e) {
reducedResponses.set(slot, e);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
expectedOperationsPerItem.set(slot, new AtomicInteger(0));
continue;
}
Map<String, Set<String>> routing = indexNameExpressionResolver.resolveSearchRouting(clusterState, percolateRequest.routing(), percolateRequest.indices());
// TODO: I only need shardIds, ShardIterator(ShardRouting) is only needed in TransportShardMultiPercolateAction
GroupShardsIterator shards = clusterService.operationRouting().searchShards(
clusterState, concreteIndices, routing, percolateRequest.preference()
);
if (shards.size() == 0) {
reducedResponses.set(slot, new UnavailableShardsException(null, "No shards available"));
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
expectedOperationsPerItem.set(slot, new AtomicInteger(0));
continue;
}
// The shard id is used as index in the atomic ref array, so we need to find out how many shards there are regardless of routing:
int numShards = clusterService.operationRouting().searchShardsCount(clusterState, concreteIndices, null);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(numShards));
expectedOperationsPerItem.set(slot, new AtomicInteger(shards.size()));
for (ShardIterator shard : shards) {
ShardId shardId = shard.shardId();
TransportShardMultiPercolateAction.Request requests = requestsByShard.get(shardId);
if (requests == null) {
requestsByShard.put(shardId, requests = new TransportShardMultiPercolateAction.Request(shardId.getIndexName(), shardId.getId(), percolateRequest.preference()));
}
logger.trace("Adding shard[{}] percolate request for item[{}]", shardId, slot);
requests.add(new TransportShardMultiPercolateAction.Request.Item(slot, new PercolateShardRequest(shardId, percolateRequest)));
IntArrayList items = shardToSlots.get(shardId);
if (items == null) {
shardToSlots.put(shardId, items = new IntArrayList());
}
items.add(slot);
}
expectedResults++;
} else if (element instanceof Throwable || element instanceof MultiGetResponse.Failure) {
logger.trace("item[{}] won't be executed, reason: {}", slot, element);
reducedResponses.set(slot, element);
responsesByItemAndShard.set(slot, new AtomicReferenceArray(0));
expectedOperationsPerItem.set(slot, new AtomicInteger(0));
}
}
expectedOperations = new AtomicInteger(expectedResults);
}
void run() {
if (expectedOperations.get() == 0) {
finish();
return;
}
logger.trace("mpercolate executing for shards {}", requestsByShard.keySet());
for (Map.Entry<ShardId, TransportShardMultiPercolateAction.Request> entry : requestsByShard.entrySet()) {
final ShardId shardId = entry.getKey();
TransportShardMultiPercolateAction.Request shardRequest = entry.getValue();
shardMultiPercolateAction.execute(shardRequest, new ActionListener<TransportShardMultiPercolateAction.Response>() {
private void innerDoExecute(MultiPercolateRequest request, ActionListener<MultiPercolateResponse> listener, Map<Integer, BytesReference> getResponseSources, Map<Integer, MultiPercolateResponse.Item> preFailures) {
try {
MultiSearchRequest multiSearchRequest = createMultiSearchRequest(request, getResponseSources, preFailures);
if (multiSearchRequest.requests().isEmpty()) {
// we may failed to turn all percolate requests into search requests,
// in that case just return the response...
listener.onResponse(
createMultiPercolateResponse(new MultiSearchResponse(new MultiSearchResponse.Item[0]), request, preFailures)
);
} else {
client.multiSearch(multiSearchRequest, new ActionListener<MultiSearchResponse>() {
@Override
public void onResponse(TransportShardMultiPercolateAction.Response response) {
onShardResponse(shardId, response);
public void onResponse(MultiSearchResponse response) {
try {
listener.onResponse(createMultiPercolateResponse(response, request, preFailures));
} catch (Exception e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
onShardFailure(shardId, e);
listener.onFailure(e);
}
});
}
} catch (Exception e) {
listener.onFailure(e);
}
}
@SuppressWarnings("unchecked")
void onShardResponse(ShardId shardId, TransportShardMultiPercolateAction.Response response) {
logger.trace("{} Percolate shard response", shardId);
private MultiSearchRequest createMultiSearchRequest(MultiPercolateRequest multiPercolateRequest, Map<Integer, BytesReference> getResponseSources, Map<Integer, MultiPercolateResponse.Item> preFailures) throws IOException {
MultiSearchRequest multiSearchRequest = new MultiSearchRequest();
multiSearchRequest.indicesOptions(multiPercolateRequest.indicesOptions());
for (int i = 0; i < multiPercolateRequest.requests().size(); i++) {
if (preFailures.keySet().contains(i)) {
continue;
}
PercolateRequest percolateRequest = multiPercolateRequest.requests().get(i);
BytesReference docSource = getResponseSources.get(i);
try {
for (TransportShardMultiPercolateAction.Response.Item item : response.items()) {
AtomicReferenceArray shardResults = responsesByItemAndShard.get(item.slot());
if (shardResults == null) {
assert false : "shardResults can't be null";
continue;
}
if (item.failed()) {
shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, item.error()));
} else {
shardResults.set(shardId.id(), item.response());
}
assert expectedOperationsPerItem.get(item.slot()).get() >= 1 : "slot[" + item.slot() + "] can't be lower than one";
if (expectedOperationsPerItem.get(item.slot()).decrementAndGet() == 0) {
// Failure won't bubble up, since we fail the whole request now via the catch clause below,
// so expectedOperationsPerItem will not be decremented twice.
reduce(item.slot());
}
}
} catch (Throwable e) {
logger.error("{} Percolate original reduce error", e, shardId);
finalListener.onFailure(e);
SearchRequest searchRequest = TransportPercolateAction.createSearchRequest(
percolateRequest, docSource, queryRegistry, aggParsers, parseFieldMatcher
);
multiSearchRequest.add(searchRequest);
} catch (Exception e) {
preFailures.put(i, new MultiPercolateResponse.Item(e));
}
}
@SuppressWarnings("unchecked")
void onShardFailure(ShardId shardId, Throwable e) {
logger.debug("{} Shard multi percolate failure", e, shardId);
try {
IntArrayList slots = shardToSlots.get(shardId);
for (int i = 0; i < slots.size(); i++) {
int slot = slots.get(i);
AtomicReferenceArray shardResults = responsesByItemAndShard.get(slot);
if (shardResults == null) {
continue;
}
return multiSearchRequest;
}
shardResults.set(shardId.id(), new BroadcastShardOperationFailedException(shardId, e));
assert expectedOperationsPerItem.get(slot).get() >= 1 : "slot[" + slot + "] can't be lower than one. Caused by: " + e.getMessage();
if (expectedOperationsPerItem.get(slot).decrementAndGet() == 0) {
reduce(slot);
}
}
} catch (Throwable t) {
logger.error("{} Percolate original reduce error, original error {}", t, shardId, e);
finalListener.onFailure(t);
}
}
void reduce(int slot) {
AtomicReferenceArray shardResponses = responsesByItemAndShard.get(slot);
PercolateResponse reducedResponse = TransportPercolateAction.reduce((PercolateRequest) percolateRequests.get(slot), shardResponses, percolatorService);
reducedResponses.set(slot, reducedResponse);
assert expectedOperations.get() >= 1 : "slot[" + slot + "] expected options should be >= 1 but is " + expectedOperations.get();
if (expectedOperations.decrementAndGet() == 0) {
finish();
}
}
void finish() {
MultiPercolateResponse.Item[] finalResponse = new MultiPercolateResponse.Item[reducedResponses.length()];
for (int slot = 0; slot < reducedResponses.length(); slot++) {
Object element = reducedResponses.get(slot);
assert element != null : "Element[" + slot + "] shouldn't be null";
if (element instanceof PercolateResponse) {
finalResponse[slot] = new MultiPercolateResponse.Item((PercolateResponse) element);
} else if (element instanceof Throwable) {
finalResponse[slot] = new MultiPercolateResponse.Item((Throwable)element);
} else if (element instanceof MultiGetResponse.Failure) {
finalResponse[slot] = new MultiPercolateResponse.Item(((MultiGetResponse.Failure)element).getFailure());
private MultiPercolateResponse createMultiPercolateResponse(MultiSearchResponse multiSearchResponse, MultiPercolateRequest request, Map<Integer, MultiPercolateResponse.Item> preFailures) {
int searchResponseIndex = 0;
MultiPercolateResponse.Item[] percolateItems = new MultiPercolateResponse.Item[request.requests().size()];
for (int i = 0; i < percolateItems.length; i++) {
if (preFailures.keySet().contains(i)) {
percolateItems[i] = preFailures.get(i);
} else {
MultiSearchResponse.Item searchItem = multiSearchResponse.getResponses()[searchResponseIndex++];
if (searchItem.isFailure()) {
percolateItems[i] = new MultiPercolateResponse.Item(searchItem.getFailure());
} else {
PercolateRequest percolateRequest = request.requests().get(i);
percolateItems[i] = new MultiPercolateResponse.Item(TransportPercolateAction.createPercolateResponse(searchItem.getResponse(), percolateRequest.onlyCount()));
}
}
finalListener.onResponse(new MultiPercolateResponse(finalResponse));
}
return new MultiPercolateResponse(percolateItems);
}
}

View File

@ -18,71 +18,74 @@
*/
package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.ResourceNotFoundException;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ShardOperationFailedException;
import org.elasticsearch.action.get.GetResponse;
import org.elasticsearch.action.get.TransportGetAction;
import org.elasticsearch.action.search.SearchRequest;
import org.elasticsearch.action.search.SearchResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.client.Client;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.common.ParseFieldMatcher;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.engine.DocumentMissingException;
import org.elasticsearch.percolator.PercolateException;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.common.text.Text;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentFactory;
import org.elasticsearch.common.xcontent.XContentHelper;
import org.elasticsearch.common.xcontent.XContentParser;
import org.elasticsearch.common.xcontent.XContentType;
import org.elasticsearch.index.query.BoolQueryBuilder;
import org.elasticsearch.index.query.PercolatorQueryBuilder;
import org.elasticsearch.index.query.QueryBuilder;
import org.elasticsearch.index.query.QueryBuilders;
import org.elasticsearch.index.query.QueryParseContext;
import org.elasticsearch.index.query.TemplateQueryParser;
import org.elasticsearch.indices.query.IndicesQueriesRegistry;
import org.elasticsearch.rest.action.support.RestActions;
import org.elasticsearch.script.Template;
import org.elasticsearch.search.SearchHit;
import org.elasticsearch.search.SearchHits;
import org.elasticsearch.search.aggregations.AggregatorParsers;
import org.elasticsearch.search.aggregations.InternalAggregations;
import org.elasticsearch.search.builder.SearchSourceBuilder;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicReferenceArray;
import java.util.Arrays;
/**
*
*/
public class TransportPercolateAction extends TransportBroadcastAction<PercolateRequest, PercolateResponse, PercolateShardRequest, PercolateShardResponse> {
public class TransportPercolateAction extends HandledTransportAction<PercolateRequest, PercolateResponse> {
private final PercolatorService percolatorService;
private final TransportGetAction getAction;
private final Client client;
private final ParseFieldMatcher parseFieldMatcher;
private final IndicesQueriesRegistry queryRegistry;
private final AggregatorParsers aggParsers;
@Inject
public TransportPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, PercolatorService percolatorService,
TransportGetAction getAction, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, PercolateAction.NAME, threadPool, clusterService, transportService, actionFilters,
indexNameExpressionResolver, PercolateRequest::new, PercolateShardRequest::new, ThreadPool.Names.PERCOLATE);
this.percolatorService = percolatorService;
this.getAction = getAction;
public TransportPercolateAction(Settings settings, ThreadPool threadPool, TransportService transportService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver,
Client client, IndicesQueriesRegistry indicesQueriesRegistry, AggregatorParsers aggParsers) {
super(settings, PercolateAction.NAME, threadPool, transportService, actionFilters, indexNameExpressionResolver, PercolateRequest::new);
this.client = client;
this.aggParsers = aggParsers;
this.parseFieldMatcher = new ParseFieldMatcher(settings);
this.queryRegistry = indicesQueriesRegistry;
}
@Override
protected void doExecute(Task task, final PercolateRequest request, final ActionListener<PercolateResponse> listener) {
request.startTime = System.currentTimeMillis();
protected void doExecute(PercolateRequest request, ActionListener<PercolateResponse> listener) {
if (request.getRequest() != null) {
getAction.execute(request.getRequest(), new ActionListener<GetResponse>() {
client.get(request.getRequest(), new ActionListener<GetResponse>() {
@Override
public void onResponse(GetResponse getResponse) {
if (!getResponse.isExists()) {
onFailure(new DocumentMissingException(null, request.getRequest().type(), request.getRequest().id()));
return;
if (getResponse.isExists()) {
innerDoExecute(request, getResponse.getSourceAsBytesRef(), listener);
} else {
onFailure(new ResourceNotFoundException("percolate document [{}/{}/{}] doesn't exist", request.getRequest().index(), request.getRequest().type(), request.getRequest().id()));
}
BytesReference docSource = getResponse.getSourceAsBytesRef();
TransportPercolateAction.super.doExecute(task, new PercolateRequest(request, docSource), listener);
}
@Override
@ -91,99 +94,153 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
}
});
} else {
super.doExecute(task, request, listener);
innerDoExecute(request, null, listener);
}
}
@Override
protected ClusterBlockException checkGlobalBlock(ClusterState state, PercolateRequest request) {
return state.blocks().globalBlockedException(ClusterBlockLevel.READ);
}
@Override
protected ClusterBlockException checkRequestBlock(ClusterState state, PercolateRequest request, String[] concreteIndices) {
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
}
@Override
protected PercolateResponse newResponse(PercolateRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
return reduce(request, shardsResponses, percolatorService);
}
public static PercolateResponse reduce(PercolateRequest request, AtomicReferenceArray shardsResponses, PercolatorService percolatorService) {
int successfulShards = 0;
int failedShards = 0;
List<PercolateShardResponse> shardResults = null;
List<ShardOperationFailedException> shardFailures = null;
boolean onlyCount = false;
for (int i = 0; i < shardsResponses.length(); i++) {
Object shardResponse = shardsResponses.get(i);
if (shardResponse == null) {
// simply ignore non active shards
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
failedShards++;
if (shardFailures == null) {
shardFailures = new ArrayList<>();
}
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
} else {
PercolateShardResponse percolateShardResponse = (PercolateShardResponse) shardResponse;
successfulShards++;
if (!percolateShardResponse.isEmpty()) {
if (shardResults == null) {
onlyCount = percolateShardResponse.onlyCount();
shardResults = new ArrayList<>();
}
shardResults.add(percolateShardResponse);
}
}
}
if (shardResults == null) {
long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime);
PercolateResponse.Match[] matches = request.onlyCount() ? null : PercolateResponse.EMPTY;
return new PercolateResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures, tookInMillis, matches);
} else {
PercolatorService.ReduceResult result = null;
try {
result = percolatorService.reduce(onlyCount, shardResults);
} catch (IOException e) {
throw new ElasticsearchException("error during reduce phase", e);
}
long tookInMillis = Math.max(1, System.currentTimeMillis() - request.startTime);
return new PercolateResponse(
shardsResponses.length(), successfulShards, failedShards, shardFailures,
result.matches(), result.count(), tookInMillis, result.reducedAggregations()
);
}
}
@Override
protected PercolateShardRequest newShardRequest(int numShards, ShardRouting shard, PercolateRequest request) {
return new PercolateShardRequest(shard.shardId(), numShards, request);
}
@Override
protected PercolateShardResponse newShardResponse() {
return new PercolateShardResponse();
}
@Override
protected GroupShardsIterator shards(ClusterState clusterState, PercolateRequest request, String[] concreteIndices) {
Map<String, Set<String>> routingMap = indexNameExpressionResolver.resolveSearchRouting(clusterState, request.routing(), request.indices());
return clusterService.operationRouting().searchShards(clusterState, concreteIndices, routingMap, request.preference());
}
@Override
protected PercolateShardResponse shardOperation(PercolateShardRequest request) {
private void innerDoExecute(PercolateRequest request, BytesReference docSource, ActionListener<PercolateResponse> listener) {
SearchRequest searchRequest;
try {
return percolatorService.percolate(request);
} catch (Throwable e) {
logger.trace("{} failed to percolate", e, request.shardId());
throw new PercolateException(request.shardId(), "failed to percolate", e);
searchRequest = createSearchRequest(request, docSource, queryRegistry, aggParsers, parseFieldMatcher);
} catch (IOException e) {
listener.onFailure(e);
return;
}
client.search(searchRequest, new ActionListener<SearchResponse>() {
@Override
public void onResponse(SearchResponse searchResponse) {
try {
listener.onResponse(createPercolateResponse(searchResponse, request.onlyCount()));
} catch (Exception e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
listener.onFailure(e);
}
});
}
public static SearchRequest createSearchRequest(PercolateRequest percolateRequest, BytesReference documentSource, IndicesQueriesRegistry queryRegistry, AggregatorParsers aggParsers, ParseFieldMatcher parseFieldMatcher) throws IOException {
SearchRequest searchRequest = new SearchRequest();
if (percolateRequest.indices() != null) {
searchRequest.indices(percolateRequest.indices());
}
searchRequest.indicesOptions(percolateRequest.indicesOptions());
searchRequest.routing(percolateRequest.routing());
searchRequest.preference(percolateRequest.preference());
BytesReference querySource = null;
XContentBuilder searchSource = XContentFactory.jsonBuilder().startObject();
if (percolateRequest.source() != null && percolateRequest.source().length() > 0) {
try (XContentParser parser = XContentHelper.createParser(percolateRequest.source())) {
String currentFieldName = null;
XContentParser.Token token = parser.nextToken();
if (token != XContentParser.Token.START_OBJECT) {
throw new IllegalArgumentException("Unknown token [" + token+ "]");
}
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if (token == XContentParser.Token.START_OBJECT) {
if ("doc".equals(currentFieldName)) {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.copyCurrentStructure(parser);
builder.flush();
documentSource = builder.bytes();
} else if ("query".equals(currentFieldName) || "filter".equals(currentFieldName)) {
XContentBuilder builder = XContentFactory.jsonBuilder();
builder.copyCurrentStructure(parser);
builder.flush();
querySource = builder.bytes();
} else if ("sort".equals(currentFieldName)) {
searchSource.field("sort");
searchSource.copyCurrentStructure(parser);
} else if ("aggregations".equals(currentFieldName)) {
searchSource.field("aggregations");
searchSource.copyCurrentStructure(parser);
} else if ("highlight".equals(currentFieldName)) {
searchSource.field("highlight");
searchSource.copyCurrentStructure(parser);
} else {
throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]");
}
} else if (token == XContentParser.Token.START_ARRAY) {
if ("sort".equals(currentFieldName)) {
searchSource.field("sort");
searchSource.copyCurrentStructure(parser);
} else {
throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]");
}
} else if (token.isValue()) {
if ("size".equals(currentFieldName)) {
searchSource.field("size", parser.intValue());
} else if ("sort".equals(currentFieldName)) {
searchSource.field("sort", parser.text());
} else if ("track_scores".equals(currentFieldName) || "trackScores".equals(currentFieldName)) {
searchSource.field("track_scores", parser.booleanValue());
} else {
throw new IllegalArgumentException("Unknown field [" + currentFieldName+ "]");
}
} else {
throw new IllegalArgumentException("Unknown token [" + token + "]");
}
}
}
}
if (percolateRequest.onlyCount()) {
searchSource.field("size", 0);
}
PercolatorQueryBuilder percolatorQueryBuilder = new PercolatorQueryBuilder(percolateRequest.documentType(), documentSource);
if (querySource != null) {
QueryParseContext queryParseContext = new QueryParseContext(queryRegistry);
queryParseContext.reset(XContentHelper.createParser(querySource));
queryParseContext.parseFieldMatcher(parseFieldMatcher);
QueryBuilder<?> queryBuilder = queryParseContext.parseInnerQueryBuilder();
BoolQueryBuilder boolQueryBuilder = QueryBuilders.boolQuery();
boolQueryBuilder.must(queryBuilder);
boolQueryBuilder.filter(percolatorQueryBuilder);
searchSource.field("query", boolQueryBuilder);
} else {
searchSource.field("query", percolatorQueryBuilder);
}
searchSource.endObject();
searchSource.flush();
BytesReference source = searchSource.bytes();
SearchSourceBuilder searchSourceBuilder = new SearchSourceBuilder();
QueryParseContext context = new QueryParseContext(queryRegistry);
try (XContentParser parser = XContentFactory.xContent(XContentType.JSON).createParser(source)) {
context.reset(parser);
context.parseFieldMatcher(parseFieldMatcher);
searchSourceBuilder.parseXContent(parser, context, aggParsers, null);
searchRequest.source(searchSourceBuilder);
return searchRequest;
}
}
public static PercolateResponse createPercolateResponse(SearchResponse searchResponse, boolean onlyCount) {
SearchHits hits = searchResponse.getHits();
PercolateResponse.Match[] matches;
if (onlyCount) {
matches = null;
} else {
matches = new PercolateResponse.Match[hits.getHits().length];
for (int i = 0; i < hits.getHits().length; i++) {
SearchHit hit = hits.getHits()[i];
matches[i] = new PercolateResponse.Match(new Text(hit.getIndex()), new Text(hit.getId()), hit.getScore(), hit.getHighlightFields());
}
}
return new PercolateResponse(
searchResponse.getTotalShards(), searchResponse.getSuccessfulShards(), searchResponse.getFailedShards(),
Arrays.asList(searchResponse.getShardFailures()), matches, hits.getTotalHits(), searchResponse.getTookInMillis(), (InternalAggregations) searchResponse.getAggregations()
);
}
}

View File

@ -1,281 +0,0 @@
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.percolate;
import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.IndicesRequest;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.action.support.single.shard.SingleShardRequest;
import org.elasticsearch.action.support.single.shard.TransportSingleShardAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.shard.ShardId;
import org.elasticsearch.percolator.PercolatorService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
/**
*/
public class TransportShardMultiPercolateAction extends TransportSingleShardAction<TransportShardMultiPercolateAction.Request, TransportShardMultiPercolateAction.Response> {
private final PercolatorService percolatorService;
private static final String ACTION_NAME = MultiPercolateAction.NAME + "[shard]";
@Inject
public TransportShardMultiPercolateAction(Settings settings, ThreadPool threadPool, ClusterService clusterService,
TransportService transportService, PercolatorService percolatorService,
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
super(settings, ACTION_NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
Request::new, ThreadPool.Names.PERCOLATE);
this.percolatorService = percolatorService;
}
@Override
protected boolean isSubAction() {
return true;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected boolean resolveIndex(Request request) {
return false;
}
@Override
protected ShardIterator shards(ClusterState state, InternalRequest request) {
return clusterService.operationRouting().getShards(
state, request.concreteIndex(), request.request().shardId(), request.request().preference
);
}
@Override
protected Response shardOperation(Request request, ShardId shardId) {
// TODO: Look into combining the shard req's docs into one in memory index.
Response response = new Response();
response.items = new ArrayList<>(request.items.size());
for (Request.Item item : request.items) {
Response.Item responseItem;
int slot = item.slot;
try {
responseItem = new Response.Item(slot, percolatorService.percolate(item.request));
} catch (Throwable t) {
if (TransportActions.isShardNotAvailableException(t)) {
throw (ElasticsearchException) t;
} else {
logger.debug("{} failed to multi percolate", t, request.shardId());
responseItem = new Response.Item(slot, t);
}
}
response.items.add(responseItem);
}
return response;
}
public static class Request extends SingleShardRequest<Request> implements IndicesRequest {
private int shardId;
private String preference;
private List<Item> items;
public Request() {
}
Request(String concreteIndex, int shardId, String preference) {
super(concreteIndex);
this.shardId = shardId;
this.preference = preference;
this.items = new ArrayList<>();
}
@Override
public ActionRequestValidationException validate() {
return super.validateNonNullIndex();
}
@Override
public String[] indices() {
List<String> indices = new ArrayList<>();
for (Item item : items) {
Collections.addAll(indices, item.request.indices());
}
return indices.toArray(new String[indices.size()]);
}
public int shardId() {
return shardId;
}
public void add(Item item) {
items.add(item);
}
public List<Item> items() {
return items;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
shardId = in.readVInt();
preference = in.readOptionalString();
int size = in.readVInt();
items = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
int slot = in.readVInt();
PercolateShardRequest shardRequest = new PercolateShardRequest();
shardRequest.readFrom(in);
Item item = new Item(slot, shardRequest);
items.add(item);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(shardId);
out.writeOptionalString(preference);
out.writeVInt(items.size());
for (Item item : items) {
out.writeVInt(item.slot);
item.request.writeTo(out);
}
}
static class Item {
private final int slot;
private final PercolateShardRequest request;
public Item(int slot, PercolateShardRequest request) {
this.slot = slot;
this.request = request;
}
public int slot() {
return slot;
}
public PercolateShardRequest request() {
return request;
}
}
}
public static class Response extends ActionResponse {
private List<Item> items;
public List<Item> items() {
return items;
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeVInt(items.size());
for (Item item : items) {
out.writeVInt(item.slot);
if (item.response != null) {
out.writeBoolean(true);
item.response.writeTo(out);
} else {
out.writeBoolean(false);
out.writeThrowable(item.error);
}
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
int size = in.readVInt();
items = new ArrayList<>(size);
for (int i = 0; i < size; i++) {
int slot = in.readVInt();
if (in.readBoolean()) {
PercolateShardResponse shardResponse = new PercolateShardResponse();
shardResponse.readFrom(in);
items.add(new Item(slot, shardResponse));
} else {
items.add(new Item(slot, in.readThrowable()));
}
}
}
public static class Item {
private final int slot;
private final PercolateShardResponse response;
private final Throwable error;
public Item(Integer slot, PercolateShardResponse response) {
this.slot = slot;
this.response = response;
this.error = null;
}
public Item(Integer slot, Throwable error) {
this.slot = slot;
this.error = error;
this.response = null;
}
public int slot() {
return slot;
}
public PercolateShardResponse response() {
return response;
}
public Throwable error() {
return error;
}
public boolean failed() {
return error != null;
}
}
}
}

View File

@ -25,7 +25,6 @@ import org.apache.lucene.search.TopDocs;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.NoShardAvailableActionException;
import org.elasticsearch.action.support.TransportActions;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
@ -34,6 +33,7 @@ import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.routing.GroupShardsIterator;
import org.elasticsearch.cluster.routing.ShardIterator;
import org.elasticsearch.cluster.routing.ShardRouting;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;

View File

@ -21,9 +21,9 @@ package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;

View File

@ -23,9 +23,9 @@ import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;

View File

@ -21,9 +21,9 @@ package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.search.action.SearchTransportService;
import org.elasticsearch.search.controller.SearchPhaseController;

View File

@ -23,9 +23,9 @@ import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRunnable;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.SearchShardTarget;

View File

@ -21,9 +21,9 @@ package org.elasticsearch.action.search;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;

View File

@ -22,9 +22,9 @@ package org.elasticsearch.action.search;
import com.carrotsearch.hppc.IntArrayList;
import org.apache.lucene.search.ScoreDoc;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.util.concurrent.AtomicArray;
import org.elasticsearch.search.action.SearchTransportService;

View File

@ -22,11 +22,11 @@ package org.elasticsearch.action.search;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.HandledTransportAction;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.node.DiscoveryNodes;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.util.concurrent.CountDown;

Some files were not shown because too many files have changed in this diff Show More