mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-03-24 17:09:48 +00:00
Merge branch 'master' into tell_me_your_plugins
This commit is contained in:
commit
6295f8e795
@ -90,12 +90,6 @@ public class MapperQueryParser extends QueryParser {
|
||||
this.parseContext = parseContext;
|
||||
}
|
||||
|
||||
public MapperQueryParser(QueryParserSettings settings, QueryParseContext parseContext) {
|
||||
super(settings.defaultField(), settings.defaultAnalyzer());
|
||||
this.parseContext = parseContext;
|
||||
reset(settings);
|
||||
}
|
||||
|
||||
public void reset(QueryParserSettings settings) {
|
||||
this.settings = settings;
|
||||
this.field = settings.defaultField();
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.apache.lucene.queryparser.classic;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectFloatHashMap;
|
||||
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.search.FuzzyQuery;
|
||||
import org.apache.lucene.search.MultiTermQuery;
|
||||
@ -28,7 +27,6 @@ import org.apache.lucene.util.automaton.Operations;
|
||||
import org.elasticsearch.common.unit.Fuzziness;
|
||||
import org.joda.time.DateTimeZone;
|
||||
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
@ -69,16 +67,10 @@ public class QueryParserSettings {
|
||||
private DateTimeZone timeZone;
|
||||
|
||||
List<String> fields = null;
|
||||
Collection<String> queryTypes = null;
|
||||
ObjectFloatHashMap<String> boosts = null;
|
||||
float tieBreaker = 0.0f;
|
||||
boolean useDisMax = true;
|
||||
|
||||
public boolean isCacheable() {
|
||||
// a hack for now :) to determine if a query string is cacheable
|
||||
return !queryString.contains("now");
|
||||
}
|
||||
|
||||
public String queryString() {
|
||||
return queryString;
|
||||
}
|
||||
@ -271,14 +263,6 @@ public class QueryParserSettings {
|
||||
this.fields = fields;
|
||||
}
|
||||
|
||||
public Collection<String> queryTypes() {
|
||||
return queryTypes;
|
||||
}
|
||||
|
||||
public void queryTypes(Collection<String> queryTypes) {
|
||||
this.queryTypes = queryTypes;
|
||||
}
|
||||
|
||||
public ObjectFloatHashMap<String> boosts() {
|
||||
return boosts;
|
||||
}
|
||||
@ -371,7 +355,6 @@ public class QueryParserSettings {
|
||||
if (useDisMax != that.useDisMax) return false;
|
||||
if (boosts != null ? !boosts.equals(that.boosts) : that.boosts != null) return false;
|
||||
if (fields != null ? !fields.equals(that.fields) : that.fields != null) return false;
|
||||
if (queryTypes != null ? !queryTypes.equals(that.queryTypes) : that.queryTypes != null) return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -398,7 +381,6 @@ public class QueryParserSettings {
|
||||
result = 31 * result + (analyzeWildcard ? 1 : 0);
|
||||
|
||||
result = 31 * result + (fields != null ? fields.hashCode() : 0);
|
||||
result = 31 * result + (queryTypes != null ? queryTypes.hashCode() : 0);
|
||||
result = 31 * result + (boosts != null ? boosts.hashCode() : 0);
|
||||
result = 31 * result + (tieBreaker != +0.0f ? Float.floatToIntBits(tieBreaker) : 0);
|
||||
result = 31 * result + (useDisMax ? 1 : 0);
|
||||
|
@ -19,9 +19,7 @@
|
||||
|
||||
package org.elasticsearch.action;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import org.elasticsearch.action.admin.cluster.health.ClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.health.TransportClusterHealthAction;
|
||||
import org.elasticsearch.action.admin.cluster.node.hotthreads.NodesHotThreadsAction;
|
||||
@ -85,7 +83,11 @@ import org.elasticsearch.action.admin.indices.flush.FlushAction;
|
||||
import org.elasticsearch.action.admin.indices.flush.TransportFlushAction;
|
||||
import org.elasticsearch.action.admin.indices.get.GetIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.get.TransportGetIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.*;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetMappingsAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.TransportGetFieldMappingsAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.TransportGetFieldMappingsIndexAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.TransportGetMappingsAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.PutMappingAction;
|
||||
import org.elasticsearch.action.admin.indices.mapping.put.TransportPutMappingAction;
|
||||
import org.elasticsearch.action.admin.indices.open.OpenIndexAction;
|
||||
@ -96,14 +98,14 @@ import org.elasticsearch.action.admin.indices.recovery.RecoveryAction;
|
||||
import org.elasticsearch.action.admin.indices.recovery.TransportRecoveryAction;
|
||||
import org.elasticsearch.action.admin.indices.refresh.RefreshAction;
|
||||
import org.elasticsearch.action.admin.indices.refresh.TransportRefreshAction;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.segments.IndicesSegmentsAction;
|
||||
import org.elasticsearch.action.admin.indices.segments.TransportIndicesSegmentsAction;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.GetSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.settings.get.TransportGetSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.TransportUpdateSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.settings.put.UpdateSettingsAction;
|
||||
import org.elasticsearch.action.admin.indices.shards.IndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.shards.TransportIndicesShardStoresAction;
|
||||
import org.elasticsearch.action.admin.indices.stats.IndicesStatsAction;
|
||||
import org.elasticsearch.action.admin.indices.stats.TransportIndicesStatsAction;
|
||||
import org.elasticsearch.action.admin.indices.template.delete.DeleteIndexTemplateAction;
|
||||
@ -139,7 +141,11 @@ import org.elasticsearch.action.explain.ExplainAction;
|
||||
import org.elasticsearch.action.explain.TransportExplainAction;
|
||||
import org.elasticsearch.action.fieldstats.FieldStatsAction;
|
||||
import org.elasticsearch.action.fieldstats.TransportFieldStatsTransportAction;
|
||||
import org.elasticsearch.action.get.*;
|
||||
import org.elasticsearch.action.get.GetAction;
|
||||
import org.elasticsearch.action.get.MultiGetAction;
|
||||
import org.elasticsearch.action.get.TransportGetAction;
|
||||
import org.elasticsearch.action.get.TransportMultiGetAction;
|
||||
import org.elasticsearch.action.get.TransportShardMultiGetAction;
|
||||
import org.elasticsearch.action.index.IndexAction;
|
||||
import org.elasticsearch.action.index.TransportIndexAction;
|
||||
import org.elasticsearch.action.indexedscripts.delete.DeleteIndexedScriptAction;
|
||||
@ -148,16 +154,38 @@ import org.elasticsearch.action.indexedscripts.get.GetIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.get.TransportGetIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.PutIndexedScriptAction;
|
||||
import org.elasticsearch.action.indexedscripts.put.TransportPutIndexedScriptAction;
|
||||
import org.elasticsearch.action.percolate.*;
|
||||
import org.elasticsearch.action.search.*;
|
||||
import org.elasticsearch.action.search.type.*;
|
||||
import org.elasticsearch.action.percolate.MultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.PercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportMultiPercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportPercolateAction;
|
||||
import org.elasticsearch.action.percolate.TransportShardMultiPercolateAction;
|
||||
import org.elasticsearch.action.search.ClearScrollAction;
|
||||
import org.elasticsearch.action.search.MultiSearchAction;
|
||||
import org.elasticsearch.action.search.SearchAction;
|
||||
import org.elasticsearch.action.search.SearchScrollAction;
|
||||
import org.elasticsearch.action.search.TransportClearScrollAction;
|
||||
import org.elasticsearch.action.search.TransportMultiSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchAction;
|
||||
import org.elasticsearch.action.search.TransportSearchScrollAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchDfsQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScanAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryAndFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollQueryThenFetchAction;
|
||||
import org.elasticsearch.action.search.type.TransportSearchScrollScanAction;
|
||||
import org.elasticsearch.action.suggest.SuggestAction;
|
||||
import org.elasticsearch.action.suggest.TransportSuggestAction;
|
||||
import org.elasticsearch.action.support.ActionFilter;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.AutoCreateIndex;
|
||||
import org.elasticsearch.action.support.TransportAction;
|
||||
import org.elasticsearch.action.termvectors.*;
|
||||
import org.elasticsearch.action.termvectors.MultiTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.TermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.TransportMultiTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.TransportShardMultiTermsVectorAction;
|
||||
import org.elasticsearch.action.termvectors.TransportTermVectorsAction;
|
||||
import org.elasticsearch.action.termvectors.dfs.TransportDfsOnlyAction;
|
||||
import org.elasticsearch.action.update.TransportUpdateAction;
|
||||
import org.elasticsearch.action.update.UpdateAction;
|
||||
@ -165,6 +193,7 @@ import org.elasticsearch.common.inject.AbstractModule;
|
||||
import org.elasticsearch.common.inject.multibindings.MapBinder;
|
||||
import org.elasticsearch.common.inject.multibindings.Multibinder;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -174,7 +203,7 @@ import java.util.Map;
|
||||
public class ActionModule extends AbstractModule {
|
||||
|
||||
private final Map<String, ActionEntry> actions = Maps.newHashMap();
|
||||
private final List<Class<? extends ActionFilter>> actionFilters = Lists.newArrayList();
|
||||
private final List<Class<? extends ActionFilter>> actionFilters = new ArrayList<>();
|
||||
|
||||
static class ActionEntry<Request extends ActionRequest, Response extends ActionResponse> {
|
||||
public final GenericAction<Request, Response> action;
|
||||
|
@ -31,16 +31,19 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.StatusToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.rest.RestStatus;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static org.elasticsearch.action.admin.cluster.health.ClusterIndexHealth.readClusterIndexHealth;
|
||||
|
||||
/**
|
||||
@ -152,7 +155,7 @@ public class ClusterHealthResponse extends ActionResponse implements Iterable<Cl
|
||||
* All the validation failures, including index level validation failures.
|
||||
*/
|
||||
public List<String> getAllValidationFailures() {
|
||||
List<String> allFailures = newArrayList(getValidationFailures());
|
||||
List<String> allFailures = new ArrayList<>(getValidationFailures());
|
||||
for (ClusterIndexHealth indexHealth : indices.values()) {
|
||||
allFailures.addAll(indexHealth.getValidationFailures());
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.hotthreads;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
@ -36,6 +35,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
@ -54,7 +54,7 @@ public class TransportNodesHotThreadsAction extends TransportNodesAction<NodesHo
|
||||
|
||||
@Override
|
||||
protected NodesHotThreadsResponse newResponse(NodesHotThreadsRequest request, AtomicReferenceArray responses) {
|
||||
final List<NodeHotThreads> nodes = Lists.newArrayList();
|
||||
final List<NodeHotThreads> nodes = new ArrayList<>();
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object resp = responses.get(i);
|
||||
if (resp instanceof NodeHotThreads) {
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.admin.cluster.node.stats;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
@ -35,6 +34,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
@ -56,7 +56,7 @@ public class TransportNodesStatsAction extends TransportNodesAction<NodesStatsRe
|
||||
|
||||
@Override
|
||||
protected NodesStatsResponse newResponse(NodesStatsRequest nodesInfoRequest, AtomicReferenceArray responses) {
|
||||
final List<NodeStats> nodeStats = Lists.newArrayList();
|
||||
final List<NodeStats> nodeStats = new ArrayList<>();
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object resp = responses.get(i);
|
||||
if (resp instanceof NodeStats) {
|
||||
|
@ -21,8 +21,8 @@ package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.cluster.SnapshotsInProgress.State;
|
||||
import org.elasticsearch.cluster.metadata.SnapshotId;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
@ -32,11 +32,11 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Sets.newHashSet;
|
||||
|
||||
/**
|
||||
@ -109,7 +109,7 @@ public class SnapshotStatus implements ToXContent, Streamable {
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
List<SnapshotIndexShardStatus> shards = newArrayList();
|
||||
List<SnapshotIndexShardStatus> shards = new ArrayList<>();
|
||||
for (SnapshotIndexShardStatus shard : this.shards) {
|
||||
if (shard.getIndex().equals(index)) {
|
||||
shards.add(shard);
|
||||
|
@ -20,12 +20,15 @@
|
||||
package org.elasticsearch.action.admin.cluster.snapshots.status;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.nodes.*;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodeResponse;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesRequest;
|
||||
import org.elasticsearch.action.support.nodes.BaseNodesResponse;
|
||||
import org.elasticsearch.action.support.nodes.TransportNodesAction;
|
||||
import org.elasticsearch.cluster.ClusterName;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
@ -42,6 +45,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
@ -82,8 +86,8 @@ public class TransportNodesSnapshotsStatus extends TransportNodesAction<Transpor
|
||||
|
||||
@Override
|
||||
protected NodesSnapshotStatus newResponse(Request request, AtomicReferenceArray responses) {
|
||||
final List<NodeSnapshotStatus> nodesList = Lists.newArrayList();
|
||||
final List<FailedNodeException> failures = Lists.newArrayList();
|
||||
final List<NodeSnapshotStatus> nodesList = new ArrayList<>();
|
||||
final List<FailedNodeException> failures = new ArrayList<>();
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
Object resp = responses.get(i);
|
||||
if (resp instanceof NodeSnapshotStatus) { // will also filter out null response for unallocated ones
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.action.admin.cluster.stats;
|
||||
|
||||
import com.carrotsearch.hppc.ObjectObjectHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
|
||||
import org.elasticsearch.action.admin.indices.stats.CommonStats;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
@ -67,10 +66,10 @@ public class ClusterStatsIndices implements ToXContent, Streamable {
|
||||
|
||||
for (ClusterStatsNodeResponse r : nodeResponses) {
|
||||
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
|
||||
ShardStats indexShardStats = countsPerIndex.get(shardStats.getIndex());
|
||||
ShardStats indexShardStats = countsPerIndex.get(shardStats.getShardRouting().getIndex());
|
||||
if (indexShardStats == null) {
|
||||
indexShardStats = new ShardStats();
|
||||
countsPerIndex.put(shardStats.getIndex(), indexShardStats);
|
||||
countsPerIndex.put(shardStats.getShardRouting().getIndex(), indexShardStats);
|
||||
}
|
||||
|
||||
indexShardStats.total++;
|
||||
|
@ -106,7 +106,7 @@ public class TransportClusterStatsAction extends TransportNodesAction<ClusterSta
|
||||
for (IndexShard indexShard : indexService) {
|
||||
if (indexShard.routingEntry() != null && indexShard.routingEntry().active()) {
|
||||
// only report on fully started shards
|
||||
shardsStats.add(new ShardStats(indexShard, indexShard.routingEntry(), SHARD_STATS_FLAGS));
|
||||
shardsStats.add(new ShardStats(indexShard, SHARD_STATS_FLAGS));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -21,8 +21,6 @@ package org.elasticsearch.action.admin.indices.alias;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.AliasesRequest;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
@ -54,7 +52,7 @@ import static org.elasticsearch.cluster.metadata.AliasAction.readAliasAction;
|
||||
*/
|
||||
public class IndicesAliasesRequest extends AcknowledgedRequest<IndicesAliasesRequest> implements CompositeIndicesRequest {
|
||||
|
||||
private List<AliasActions> allAliasActions = Lists.newArrayList();
|
||||
private List<AliasActions> allAliasActions = new ArrayList<>();
|
||||
|
||||
//indices options that require every specified index to exist, expand wildcards only to open indices and
|
||||
//don't allow that no indices are resolved from wildcard expressions
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.analyze;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.apache.lucene.analysis.Analyzer;
|
||||
import org.apache.lucene.analysis.TokenStream;
|
||||
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
|
||||
@ -39,7 +38,13 @@ import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.analysis.*;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactory;
|
||||
import org.elasticsearch.index.analysis.CharFilterFactoryFactory;
|
||||
import org.elasticsearch.index.analysis.CustomAnalyzer;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactory;
|
||||
import org.elasticsearch.index.analysis.TokenFilterFactoryFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactory;
|
||||
import org.elasticsearch.index.analysis.TokenizerFactoryFactory;
|
||||
import org.elasticsearch.index.mapper.MappedFieldType;
|
||||
import org.elasticsearch.index.mapper.internal.AllFieldMapper;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
@ -49,6 +54,7 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -210,7 +216,7 @@ public class TransportAnalyzeAction extends TransportSingleShardAction<AnalyzeRe
|
||||
throw new IllegalArgumentException("failed to find analyzer");
|
||||
}
|
||||
|
||||
List<AnalyzeResponse.AnalyzeToken> tokens = Lists.newArrayList();
|
||||
List<AnalyzeResponse.AnalyzeToken> tokens = new ArrayList<>();
|
||||
TokenStream stream = null;
|
||||
int lastPosition = -1;
|
||||
int lastOffset = 0;
|
||||
|
@ -1,92 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.cache.clear;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardClearIndicesCacheRequest extends BroadcastShardRequest {
|
||||
|
||||
private boolean queryCache = false;
|
||||
private boolean fieldDataCache = false;
|
||||
private boolean recycler;
|
||||
private boolean requestCache = false;
|
||||
|
||||
private String[] fields = null;
|
||||
|
||||
ShardClearIndicesCacheRequest() {
|
||||
}
|
||||
|
||||
ShardClearIndicesCacheRequest(ShardId shardId, ClearIndicesCacheRequest request) {
|
||||
super(shardId, request);
|
||||
queryCache = request.queryCache();
|
||||
fieldDataCache = request.fieldDataCache();
|
||||
fields = request.fields();
|
||||
recycler = request.recycler();
|
||||
requestCache = request.requestCache();
|
||||
}
|
||||
|
||||
public boolean queryCache() {
|
||||
return queryCache;
|
||||
}
|
||||
|
||||
public boolean requestCache() {
|
||||
return requestCache;
|
||||
}
|
||||
|
||||
public boolean fieldDataCache() {
|
||||
return this.fieldDataCache;
|
||||
}
|
||||
|
||||
public boolean recycler() {
|
||||
return this.recycler;
|
||||
}
|
||||
|
||||
public String[] fields() {
|
||||
return this.fields;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
queryCache = in.readBoolean();
|
||||
fieldDataCache = in.readBoolean();
|
||||
recycler = in.readBoolean();
|
||||
fields = in.readStringArray();
|
||||
requestCache = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(queryCache);
|
||||
out.writeBoolean(fieldDataCache);
|
||||
out.writeBoolean(recycler);
|
||||
out.writeStringArrayNullable(fields);
|
||||
out.writeBoolean(requestCache);
|
||||
}
|
||||
}
|
@ -21,17 +21,16 @@ package org.elasticsearch.action.admin.indices.cache.clear;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
@ -40,15 +39,14 @@ import org.elasticsearch.indices.cache.request.IndicesRequestCache;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Indices clear cache action.
|
||||
*/
|
||||
public class TransportClearIndicesCacheAction extends TransportBroadcastAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, ShardClearIndicesCacheRequest, ShardClearIndicesCacheResponse> {
|
||||
public class TransportClearIndicesCacheAction extends TransportBroadcastByNodeAction<ClearIndicesCacheRequest, ClearIndicesCacheResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
private final IndicesRequestCache indicesRequestCache;
|
||||
@ -59,48 +57,33 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction<C
|
||||
IndicesRequestCache indicesQueryCache, ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, ClearIndicesCacheAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
ClearIndicesCacheRequest.class, ShardClearIndicesCacheRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
ClearIndicesCacheRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
this.indicesRequestCache = indicesQueryCache;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// simply ignore non active shards
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new ClearIndicesCacheResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected EmptyResult readShardResult(StreamInput in) throws IOException {
|
||||
return EmptyResult.readEmptyResultFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardClearIndicesCacheRequest newShardRequest(int numShards, ShardRouting shard, ClearIndicesCacheRequest request) {
|
||||
return new ShardClearIndicesCacheRequest(shard.shardId(), request);
|
||||
protected ClearIndicesCacheResponse newResponse(ClearIndicesCacheRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures) {
|
||||
return new ClearIndicesCacheResponse(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardClearIndicesCacheResponse newShardResponse() {
|
||||
return new ShardClearIndicesCacheResponse();
|
||||
protected ClearIndicesCacheRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final ClearIndicesCacheRequest request = new ClearIndicesCacheRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardClearIndicesCacheResponse shardOperation(ShardClearIndicesCacheRequest request) {
|
||||
IndexService service = indicesService.indexService(request.shardId().getIndex());
|
||||
protected EmptyResult shardOperation(ClearIndicesCacheRequest request, ShardRouting shardRouting) {
|
||||
IndexService service = indicesService.indexService(shardRouting.getIndex());
|
||||
if (service != null) {
|
||||
IndexShard shard = service.shard(request.shardId().id());
|
||||
IndexShard shard = service.shard(shardRouting.id());
|
||||
boolean clearedAtLeastOne = false;
|
||||
if (request.queryCache()) {
|
||||
clearedAtLeastOne = true;
|
||||
@ -138,15 +121,15 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction<C
|
||||
}
|
||||
}
|
||||
}
|
||||
return new ShardClearIndicesCacheResponse(request.shardId());
|
||||
return EmptyResult.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* The refresh request works against *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, ClearIndicesCacheRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
|
||||
protected ShardsIterator shards(ClusterState clusterState, ClearIndicesCacheRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allShards(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -158,5 +141,4 @@ public class TransportClearIndicesCacheAction extends TransportBroadcastAction<C
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, ClearIndicesCacheRequest request, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_WRITE, concreteIndices);
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -38,11 +38,10 @@ import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Flush Action.
|
||||
*/
|
||||
@ -71,7 +70,7 @@ public class TransportFlushAction extends TransportBroadcastAction<FlushRequest,
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.action.admin.indices.mapping.get;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.Collections2;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.admin.indices.mapping.get.GetFieldMappingsResponse.FieldMappingMetaData;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
@ -55,6 +54,8 @@ import java.io.IOException;
|
||||
import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
|
||||
import static org.elasticsearch.common.util.CollectionUtils.newLinkedList;
|
||||
|
||||
/**
|
||||
* Transport action used to retrieve the mappings related to fields that belong to a specific index
|
||||
*/
|
||||
@ -178,7 +179,7 @@ public class TransportGetFieldMappingsIndexAction extends TransportSingleShardAc
|
||||
} else if (Regex.isSimpleMatchPattern(field)) {
|
||||
// go through the field mappers 3 times, to make sure we give preference to the resolve order: full name, index name, name.
|
||||
// also make sure we only store each mapper once.
|
||||
Collection<FieldMapper> remainingFieldMappers = Lists.newLinkedList(allFieldMappers);
|
||||
Collection<FieldMapper> remainingFieldMappers = newLinkedList(allFieldMappers);
|
||||
for (Iterator<FieldMapper> it = remainingFieldMappers.iterator(); it.hasNext(); ) {
|
||||
final FieldMapper fieldMapper = it.next();
|
||||
if (Regex.simpleMatch(field, fieldMapper.fieldType().names().fullName())) {
|
||||
|
@ -1,60 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
final class ShardOptimizeRequest extends BroadcastShardRequest {
|
||||
|
||||
private OptimizeRequest request = new OptimizeRequest();
|
||||
|
||||
ShardOptimizeRequest() {
|
||||
}
|
||||
|
||||
ShardOptimizeRequest(ShardId shardId, OptimizeRequest request) {
|
||||
super(shardId, request);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
request.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
request.writeTo(out);
|
||||
}
|
||||
|
||||
public OptimizeRequest optimizeRequest() {
|
||||
return this.request;
|
||||
}
|
||||
}
|
@ -21,32 +21,29 @@ package org.elasticsearch.action.admin.indices.optimize;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Optimize index/indices action.
|
||||
*/
|
||||
public class TransportOptimizeAction extends TransportBroadcastAction<OptimizeRequest, OptimizeResponse, ShardOptimizeRequest, ShardOptimizeResponse> {
|
||||
public class TransportOptimizeAction extends TransportBroadcastByNodeAction<OptimizeRequest, OptimizeResponse, TransportBroadcastByNodeAction.EmptyResult> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@ -55,55 +52,40 @@ public class TransportOptimizeAction extends TransportBroadcastAction<OptimizeRe
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, OptimizeAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
OptimizeRequest.class, ShardOptimizeRequest.class, ThreadPool.Names.OPTIMIZE);
|
||||
OptimizeRequest.class, ThreadPool.Names.OPTIMIZE);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected OptimizeResponse newResponse(OptimizeRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// a non active shard, ignore...
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new OptimizeResponse(shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected EmptyResult readShardResult(StreamInput in) throws IOException {
|
||||
return EmptyResult.readEmptyResultFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardOptimizeRequest newShardRequest(int numShards, ShardRouting shard, OptimizeRequest request) {
|
||||
return new ShardOptimizeRequest(shard.shardId(), request);
|
||||
protected OptimizeResponse newResponse(OptimizeRequest request, int totalShards, int successfulShards, int failedShards, List<EmptyResult> responses, List<ShardOperationFailedException> shardFailures) {
|
||||
return new OptimizeResponse(totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardOptimizeResponse newShardResponse() {
|
||||
return new ShardOptimizeResponse();
|
||||
protected OptimizeRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final OptimizeRequest request = new OptimizeRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardOptimizeResponse shardOperation(ShardOptimizeRequest request) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(request.shardId().getIndex()).shardSafe(request.shardId().id());
|
||||
indexShard.optimize(request.optimizeRequest());
|
||||
return new ShardOptimizeResponse(request.shardId());
|
||||
protected EmptyResult shardOperation(OptimizeRequest request, ShardRouting shardRouting) {
|
||||
IndexShard indexShard = indicesService.indexServiceSafe(shardRouting.shardId().getIndex()).shardSafe(shardRouting.shardId().id());
|
||||
indexShard.optimize(request);
|
||||
return EmptyResult.INSTANCE;
|
||||
}
|
||||
|
||||
/**
|
||||
* The refresh request works against *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, OptimizeRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
|
||||
protected ShardsIterator shards(ClusterState clusterState, OptimizeRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allShards(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -25,6 +25,7 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -38,7 +39,7 @@ import java.util.Map;
|
||||
public class RecoveryResponse extends BroadcastResponse implements ToXContent {
|
||||
|
||||
private boolean detailed = false;
|
||||
private Map<String, List<ShardRecoveryResponse>> shardResponses = new HashMap<>();
|
||||
private Map<String, List<RecoveryState>> shardRecoveryStates = new HashMap<>();
|
||||
|
||||
public RecoveryResponse() { }
|
||||
|
||||
@ -50,18 +51,18 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent {
|
||||
* @param successfulShards Count of shards successfully processed
|
||||
* @param failedShards Count of shards which failed to process
|
||||
* @param detailed Display detailed metrics
|
||||
* @param shardResponses Map of indices to shard recovery information
|
||||
* @param shardRecoveryStates Map of indices to shard recovery information
|
||||
* @param shardFailures List of failures processing shards
|
||||
*/
|
||||
public RecoveryResponse(int totalShards, int successfulShards, int failedShards, boolean detailed,
|
||||
Map<String, List<ShardRecoveryResponse>> shardResponses, List<ShardOperationFailedException> shardFailures) {
|
||||
Map<String, List<RecoveryState>> shardRecoveryStates, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.shardResponses = shardResponses;
|
||||
this.shardRecoveryStates = shardRecoveryStates;
|
||||
this.detailed = detailed;
|
||||
}
|
||||
|
||||
public boolean hasRecoveries() {
|
||||
return shardResponses.size() > 0;
|
||||
return shardRecoveryStates.size() > 0;
|
||||
}
|
||||
|
||||
public boolean detailed() {
|
||||
@ -72,23 +73,23 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent {
|
||||
this.detailed = detailed;
|
||||
}
|
||||
|
||||
public Map<String, List<ShardRecoveryResponse>> shardResponses() {
|
||||
return shardResponses;
|
||||
public Map<String, List<RecoveryState>> shardRecoveryStates() {
|
||||
return shardRecoveryStates;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
if (hasRecoveries()) {
|
||||
for (String index : shardResponses.keySet()) {
|
||||
List<ShardRecoveryResponse> responses = shardResponses.get(index);
|
||||
if (responses == null || responses.size() == 0) {
|
||||
for (String index : shardRecoveryStates.keySet()) {
|
||||
List<RecoveryState> recoveryStates = shardRecoveryStates.get(index);
|
||||
if (recoveryStates == null || recoveryStates.size() == 0) {
|
||||
continue;
|
||||
}
|
||||
builder.startObject(index);
|
||||
builder.startArray("shards");
|
||||
for (ShardRecoveryResponse recoveryResponse : responses) {
|
||||
for (RecoveryState recoveryState : recoveryStates) {
|
||||
builder.startObject();
|
||||
recoveryResponse.toXContent(builder, params);
|
||||
recoveryState.toXContent(builder, params);
|
||||
builder.endObject();
|
||||
}
|
||||
builder.endArray();
|
||||
@ -101,12 +102,12 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent {
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeVInt(shardResponses.size());
|
||||
for (Map.Entry<String, List<ShardRecoveryResponse>> entry : shardResponses.entrySet()) {
|
||||
out.writeVInt(shardRecoveryStates.size());
|
||||
for (Map.Entry<String, List<RecoveryState>> entry : shardRecoveryStates.entrySet()) {
|
||||
out.writeString(entry.getKey());
|
||||
out.writeVInt(entry.getValue().size());
|
||||
for (ShardRecoveryResponse recoveryResponse : entry.getValue()) {
|
||||
recoveryResponse.writeTo(out);
|
||||
for (RecoveryState recoveryState : entry.getValue()) {
|
||||
recoveryState.writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -118,11 +119,11 @@ public class RecoveryResponse extends BroadcastResponse implements ToXContent {
|
||||
for (int i = 0; i < size; i++) {
|
||||
String s = in.readString();
|
||||
int listSize = in.readVInt();
|
||||
List<ShardRecoveryResponse> list = new ArrayList<>(listSize);
|
||||
List<RecoveryState> list = new ArrayList<>(listSize);
|
||||
for (int j = 0; j < listSize; j++) {
|
||||
list.add(ShardRecoveryResponse.readShardRecoveryResponse(in));
|
||||
list.add(RecoveryState.readRecoveryState(in));
|
||||
}
|
||||
shardResponses.put(s, list);
|
||||
shardRecoveryStates.put(s, list);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,100 +0,0 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.recovery;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
/**
|
||||
* Information regarding the recovery state of a shard.
|
||||
*/
|
||||
public class ShardRecoveryResponse extends BroadcastShardResponse implements ToXContent {
|
||||
|
||||
RecoveryState recoveryState;
|
||||
|
||||
public ShardRecoveryResponse() { }
|
||||
|
||||
/**
|
||||
* Constructs shard recovery information for the given index and shard id.
|
||||
*
|
||||
* @param shardId Id of the shard
|
||||
*/
|
||||
ShardRecoveryResponse(ShardId shardId) {
|
||||
super(shardId);
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets the recovery state information for the shard.
|
||||
*
|
||||
* @param recoveryState Recovery state
|
||||
*/
|
||||
public void recoveryState(RecoveryState recoveryState) {
|
||||
this.recoveryState = recoveryState;
|
||||
}
|
||||
|
||||
/**
|
||||
* Gets the recovery state information for the shard. Null if shard wasn't recovered / recovery didn't start yet.
|
||||
*
|
||||
* @return Recovery state
|
||||
*/
|
||||
@Nullable
|
||||
public RecoveryState recoveryState() {
|
||||
return recoveryState;
|
||||
}
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
recoveryState.toXContent(builder, params);
|
||||
return builder;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
recoveryState.writeTo(out);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
recoveryState = RecoveryState.readRecoveryState(in);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a new ShardRecoveryResponse from the give input stream.
|
||||
*
|
||||
* @param in Input stream
|
||||
* @return A new ShardRecoveryResponse
|
||||
* @throws IOException
|
||||
*/
|
||||
public static ShardRecoveryResponse readShardRecoveryResponse(StreamInput in) throws IOException {
|
||||
ShardRecoveryResponse response = new ShardRecoveryResponse();
|
||||
response.readFrom(in);
|
||||
return response;
|
||||
}
|
||||
}
|
@ -19,40 +19,37 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.recovery;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.recovery.RecoveryState;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
/**
|
||||
* Transport action for shard recovery operation. This transport action does not actually
|
||||
* perform shard recovery, it only reports on recoveries (both active and complete).
|
||||
*/
|
||||
public class TransportRecoveryAction extends TransportBroadcastAction<RecoveryRequest, RecoveryResponse, TransportRecoveryAction.ShardRecoveryRequest, ShardRecoveryResponse> {
|
||||
public class TransportRecoveryAction extends TransportBroadcastByNodeAction<RecoveryRequest, RecoveryResponse, RecoveryState> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@ -61,84 +58,55 @@ public class TransportRecoveryAction extends TransportBroadcastAction<RecoveryRe
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, RecoveryAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
RecoveryRequest.class, ShardRecoveryRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
RecoveryRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected RecoveryResponse newResponse(RecoveryRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
protected RecoveryState readShardResult(StreamInput in) throws IOException {
|
||||
return RecoveryState.readRecoveryState(in);
|
||||
}
|
||||
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
Map<String, List<ShardRecoveryResponse>> shardResponses = new HashMap<>();
|
||||
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// simply ignore non active shards
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = new ArrayList<>();
|
||||
@Override
|
||||
protected RecoveryResponse newResponse(RecoveryRequest request, int totalShards, int successfulShards, int failedShards, List<RecoveryState> responses, List<ShardOperationFailedException> shardFailures) {
|
||||
Map<String, List<RecoveryState>> shardResponses = Maps.newHashMap();
|
||||
for (RecoveryState recoveryState : responses) {
|
||||
if (recoveryState == null) {
|
||||
continue;
|
||||
}
|
||||
String indexName = recoveryState.getShardId().getIndex();
|
||||
if (!shardResponses.containsKey(indexName)) {
|
||||
shardResponses.put(indexName, new ArrayList<RecoveryState>());
|
||||
}
|
||||
if (request.activeOnly()) {
|
||||
if (recoveryState.getStage() != RecoveryState.Stage.DONE) {
|
||||
shardResponses.get(indexName).add(recoveryState);
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
ShardRecoveryResponse recoveryResponse = (ShardRecoveryResponse) shardResponse;
|
||||
successfulShards++;
|
||||
|
||||
if (recoveryResponse.recoveryState() == null) {
|
||||
// recovery not yet started
|
||||
continue;
|
||||
}
|
||||
|
||||
String indexName = recoveryResponse.getIndex();
|
||||
List<ShardRecoveryResponse> responses = shardResponses.get(indexName);
|
||||
|
||||
if (responses == null) {
|
||||
responses = new ArrayList<>();
|
||||
shardResponses.put(indexName, responses);
|
||||
}
|
||||
|
||||
if (request.activeOnly()) {
|
||||
if (recoveryResponse.recoveryState().getStage() != RecoveryState.Stage.DONE) {
|
||||
responses.add(recoveryResponse);
|
||||
}
|
||||
} else {
|
||||
responses.add(recoveryResponse);
|
||||
}
|
||||
shardResponses.get(indexName).add(recoveryState);
|
||||
}
|
||||
}
|
||||
|
||||
return new RecoveryResponse(shardsResponses.length(), successfulShards,
|
||||
failedShards, request.detailed(), shardResponses, shardFailures);
|
||||
return new RecoveryResponse(totalShards, successfulShards, failedShards, request.detailed(), shardResponses, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardRecoveryRequest newShardRequest(int numShards, ShardRouting shard, RecoveryRequest request) {
|
||||
return new ShardRecoveryRequest(shard.shardId(), request);
|
||||
protected RecoveryRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final RecoveryRequest recoveryRequest = new RecoveryRequest();
|
||||
recoveryRequest.readFrom(in);
|
||||
return recoveryRequest;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardRecoveryResponse newShardResponse() {
|
||||
return new ShardRecoveryResponse();
|
||||
protected RecoveryState shardOperation(RecoveryRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id());
|
||||
return indexShard.recoveryState();
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardRecoveryResponse shardOperation(ShardRecoveryRequest request) {
|
||||
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||
ShardRecoveryResponse shardRecoveryResponse = new ShardRecoveryResponse(request.shardId());
|
||||
|
||||
RecoveryState state = indexShard.recoveryState();
|
||||
shardRecoveryResponse.recoveryState(state);
|
||||
return shardRecoveryResponse;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState state, RecoveryRequest request, String[] concreteIndices) {
|
||||
return state.routingTable().allAssignedShardsGrouped(concreteIndices, true, true);
|
||||
protected ShardsIterator shards(ClusterState state, RecoveryRequest request, String[] concreteIndices) {
|
||||
return state.routingTable().allShardsIncludingRelocationTargets(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -150,14 +118,4 @@ public class TransportRecoveryAction extends TransportBroadcastAction<RecoveryRe
|
||||
protected ClusterBlockException checkRequestBlock(ClusterState state, RecoveryRequest request, String[] concreteIndices) {
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.READ, concreteIndices);
|
||||
}
|
||||
|
||||
static class ShardRecoveryRequest extends BroadcastShardRequest {
|
||||
|
||||
ShardRecoveryRequest() {
|
||||
}
|
||||
|
||||
ShardRecoveryRequest(ShardId shardId, RecoveryRequest request) {
|
||||
super(shardId, request);
|
||||
}
|
||||
}
|
||||
}
|
@ -38,11 +38,10 @@ import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Refresh action.
|
||||
*/
|
||||
@ -71,7 +70,7 @@ public class TransportRefreshAction extends TransportBroadcastAction<RefreshRequ
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -39,7 +39,7 @@ public class IndexSegments implements Iterable<IndexShardSegments> {
|
||||
for (ShardSegments shard : shards) {
|
||||
List<ShardSegments> lst = tmpIndexShards.get(shard.getShardRouting().id());
|
||||
if (lst == null) {
|
||||
lst = Lists.newArrayList();
|
||||
lst = new ArrayList<>();
|
||||
tmpIndexShards.put(shard.getShardRouting().id(), lst);
|
||||
}
|
||||
lst.add(shard);
|
||||
|
@ -19,13 +19,11 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.apache.lucene.util.Accountable;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
@ -35,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -50,7 +49,7 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont
|
||||
|
||||
}
|
||||
|
||||
IndicesSegmentResponse(ShardSegments[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
IndicesSegmentResponse(ShardSegments[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.shards = shards;
|
||||
}
|
||||
@ -63,11 +62,11 @@ public class IndicesSegmentResponse extends BroadcastResponse implements ToXCont
|
||||
|
||||
Set<String> indices = Sets.newHashSet();
|
||||
for (ShardSegments shard : shards) {
|
||||
indices.add(shard.getIndex());
|
||||
indices.add(shard.getShardRouting().getIndex());
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
List<ShardSegments> shards = Lists.newArrayList();
|
||||
List<ShardSegments> shards = new ArrayList<>();
|
||||
for (ShardSegments shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
shards.add(shard);
|
||||
|
@ -20,10 +20,10 @@
|
||||
package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
|
||||
import java.io.IOException;
|
||||
@ -33,7 +33,7 @@ import java.util.List;
|
||||
|
||||
import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEntry;
|
||||
|
||||
public class ShardSegments extends BroadcastShardResponse implements Iterable<Segment> {
|
||||
public class ShardSegments implements Streamable, Iterable<Segment> {
|
||||
|
||||
private ShardRouting shardRouting;
|
||||
|
||||
@ -43,7 +43,6 @@ public class ShardSegments extends BroadcastShardResponse implements Iterable<Se
|
||||
}
|
||||
|
||||
ShardSegments(ShardRouting shardRouting, List<Segment> segments) {
|
||||
super(shardRouting.shardId());
|
||||
this.shardRouting = shardRouting;
|
||||
this.segments = segments;
|
||||
}
|
||||
@ -89,7 +88,6 @@ public class ShardSegments extends BroadcastShardResponse implements Iterable<Se
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
int size = in.readVInt();
|
||||
if (size == 0) {
|
||||
@ -104,7 +102,6 @@ public class ShardSegments extends BroadcastShardResponse implements Iterable<Se
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardRouting.writeTo(out);
|
||||
out.writeVInt(segments.size());
|
||||
for (Segment segment : segments) {
|
||||
|
@ -21,38 +21,30 @@ package org.elasticsearch.action.admin.indices.segments;
|
||||
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportIndicesSegmentsAction extends TransportBroadcastAction<IndicesSegmentsRequest, IndicesSegmentResponse, TransportIndicesSegmentsAction.IndexShardSegmentRequest, ShardSegments> {
|
||||
public class TransportIndicesSegmentsAction extends TransportBroadcastByNodeAction<IndicesSegmentsRequest, IndicesSegmentResponse, ShardSegments> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@ -60,7 +52,7 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastAction<Ind
|
||||
public TransportIndicesSegmentsAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
|
||||
IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, IndicesSegmentsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
IndicesSegmentsRequest.class, TransportIndicesSegmentsAction.IndexShardSegmentRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
IndicesSegmentsRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@ -68,8 +60,8 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastAction<Ind
|
||||
* Segments goes across *all* active shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, IndicesSegmentsRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
|
||||
protected ShardsIterator shards(ClusterState clusterState, IndicesSegmentsRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allShards(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -83,68 +75,26 @@ public class TransportIndicesSegmentsAction extends TransportBroadcastAction<Ind
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
final List<ShardSegments> shards = newArrayList();
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// simply ignore non active shards
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
shards.add((ShardSegments) shardResponse);
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new IndicesSegmentResponse(shards.toArray(new ShardSegments[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected ShardSegments readShardResult(StreamInput in) throws IOException {
|
||||
return ShardSegments.readShardSegments(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexShardSegmentRequest newShardRequest(int numShards, ShardRouting shard, IndicesSegmentsRequest request) {
|
||||
return new IndexShardSegmentRequest(shard.shardId(), request);
|
||||
protected IndicesSegmentResponse newResponse(IndicesSegmentsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardSegments> results, List<ShardOperationFailedException> shardFailures) {
|
||||
return new IndicesSegmentResponse(results.toArray(new ShardSegments[results.size()]), totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardSegments newShardResponse() {
|
||||
return new ShardSegments();
|
||||
protected IndicesSegmentsRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
final IndicesSegmentsRequest request = new IndicesSegmentsRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardSegments shardOperation(IndexShardSegmentRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose));
|
||||
}
|
||||
|
||||
static class IndexShardSegmentRequest extends BroadcastShardRequest {
|
||||
boolean verbose;
|
||||
|
||||
IndexShardSegmentRequest() {
|
||||
verbose = false;
|
||||
}
|
||||
|
||||
IndexShardSegmentRequest(ShardId shardId, IndicesSegmentsRequest request) {
|
||||
super(shardId, request);
|
||||
verbose = request.verbose();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeBoolean(verbose);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
verbose = in.readBoolean();
|
||||
}
|
||||
protected ShardSegments shardOperation(IndicesSegmentsRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(shardRouting.id());
|
||||
return new ShardSegments(indexShard.routingEntry(), indexShard.engine().segments(request.verbose()));
|
||||
}
|
||||
}
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -57,7 +57,7 @@ public class IndexStats implements Iterable<IndexShardStats> {
|
||||
for (ShardStats shard : shards) {
|
||||
List<ShardStats> lst = tmpIndexShards.get(shard.getShardRouting().id());
|
||||
if (lst == null) {
|
||||
lst = Lists.newArrayList();
|
||||
lst = new ArrayList<>();
|
||||
tmpIndexShards.put(shard.getShardRouting().id(), lst);
|
||||
}
|
||||
lst.add(shard);
|
||||
|
@ -20,12 +20,10 @@
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
@ -35,6 +33,7 @@ import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
@ -51,7 +50,7 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
|
||||
|
||||
}
|
||||
|
||||
IndicesStatsResponse(ShardStats[] shards, ClusterState clusterState, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
IndicesStatsResponse(ShardStats[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
super(totalShards, successfulShards, failedShards, shardFailures);
|
||||
this.shards = shards;
|
||||
}
|
||||
@ -90,11 +89,11 @@ public class IndicesStatsResponse extends BroadcastResponse implements ToXConten
|
||||
|
||||
Set<String> indices = Sets.newHashSet();
|
||||
for (ShardStats shard : shards) {
|
||||
indices.add(shard.getIndex());
|
||||
indices.add(shard.getShardRouting().getIndex());
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
List<ShardStats> shards = Lists.newArrayList();
|
||||
List<ShardStats> shards = new ArrayList<>();
|
||||
for (ShardStats shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
shards.add(shard);
|
||||
|
@ -19,11 +19,11 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
@ -36,21 +36,23 @@ import static org.elasticsearch.cluster.routing.ShardRouting.readShardRoutingEnt
|
||||
|
||||
/**
|
||||
*/
|
||||
public class ShardStats extends BroadcastShardResponse implements ToXContent {
|
||||
|
||||
public class ShardStats implements Streamable, ToXContent {
|
||||
private ShardRouting shardRouting;
|
||||
|
||||
CommonStats commonStats;
|
||||
|
||||
private CommonStats commonStats;
|
||||
@Nullable
|
||||
CommitStats commitStats;
|
||||
private CommitStats commitStats;
|
||||
private String dataPath;
|
||||
private String statePath;
|
||||
private boolean isCustomDataPath;
|
||||
|
||||
ShardStats() {
|
||||
}
|
||||
|
||||
public ShardStats(IndexShard indexShard, ShardRouting shardRouting, CommonStatsFlags flags) {
|
||||
super(indexShard.shardId());
|
||||
this.shardRouting = shardRouting;
|
||||
public ShardStats(IndexShard indexShard, CommonStatsFlags flags) {
|
||||
this.shardRouting = indexShard.routingEntry();
|
||||
this.dataPath = indexShard.shardPath().getRootDataPath().toString();
|
||||
this.statePath = indexShard.shardPath().getRootStatePath().toString();
|
||||
this.isCustomDataPath = indexShard.shardPath().isCustomDataPath();
|
||||
this.commonStats = new CommonStats(indexShard, flags);
|
||||
this.commitStats = indexShard.commitStats();
|
||||
}
|
||||
@ -70,6 +72,18 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent {
|
||||
return this.commitStats;
|
||||
}
|
||||
|
||||
public String getDataPath() {
|
||||
return dataPath;
|
||||
}
|
||||
|
||||
public String getStatePath() {
|
||||
return statePath;
|
||||
}
|
||||
|
||||
public boolean isCustomDataPath() {
|
||||
return isCustomDataPath;
|
||||
}
|
||||
|
||||
public static ShardStats readShardStats(StreamInput in) throws IOException {
|
||||
ShardStats stats = new ShardStats();
|
||||
stats.readFrom(in);
|
||||
@ -78,18 +92,22 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent {
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
shardRouting = readShardRoutingEntry(in);
|
||||
commonStats = CommonStats.readCommonStats(in);
|
||||
commitStats = CommitStats.readOptionalCommitStatsFrom(in);
|
||||
statePath = in.readString();
|
||||
dataPath = in.readString();
|
||||
isCustomDataPath = in.readBoolean();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
shardRouting.writeTo(out);
|
||||
commonStats.writeTo(out);
|
||||
out.writeOptionalStreamable(commitStats);
|
||||
out.writeString(statePath);
|
||||
out.writeString(dataPath);
|
||||
out.writeBoolean(isCustomDataPath);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -105,15 +123,23 @@ public class ShardStats extends BroadcastShardResponse implements ToXContent {
|
||||
if (commitStats != null) {
|
||||
commitStats.toXContent(builder, params);
|
||||
}
|
||||
builder.startObject(Fields.SHARD_PATH);
|
||||
builder.field(Fields.STATE_PATH, statePath);
|
||||
builder.field(Fields.DATA_PATH, dataPath);
|
||||
builder.field(Fields.IS_CUSTOM_DATA_PATH, isCustomDataPath);
|
||||
builder.endObject();
|
||||
return builder;
|
||||
}
|
||||
|
||||
static final class Fields {
|
||||
static final XContentBuilderString ROUTING = new XContentBuilderString("routing");
|
||||
static final XContentBuilderString STATE = new XContentBuilderString("state");
|
||||
static final XContentBuilderString STATE_PATH = new XContentBuilderString("state_path");
|
||||
static final XContentBuilderString DATA_PATH = new XContentBuilderString("data_path");
|
||||
static final XContentBuilderString IS_CUSTOM_DATA_PATH = new XContentBuilderString("is_custom_data_path");
|
||||
static final XContentBuilderString SHARD_PATH = new XContentBuilderString("shard_path");
|
||||
static final XContentBuilderString PRIMARY = new XContentBuilderString("primary");
|
||||
static final XContentBuilderString NODE = new XContentBuilderString("node");
|
||||
static final XContentBuilderString RELOCATING_NODE = new XContentBuilderString("relocating_node");
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -19,27 +19,21 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.stats;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
@ -47,13 +41,10 @@ import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
*/
|
||||
public class TransportIndicesStatsAction extends TransportBroadcastAction<IndicesStatsRequest, IndicesStatsResponse, TransportIndicesStatsAction.IndexShardStatsRequest, ShardStats> {
|
||||
public class TransportIndicesStatsAction extends TransportBroadcastByNodeAction<IndicesStatsRequest, IndicesStatsResponse, ShardStats> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@ -62,7 +53,7 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction<Indice
|
||||
TransportService transportService, IndicesService indicesService,
|
||||
ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, IndicesStatsAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
IndicesStatsRequest.class, IndexShardStatsRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
IndicesStatsRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@ -70,8 +61,8 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction<Indice
|
||||
* Status goes across *all* shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, IndicesStatsRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allAssignedShardsGrouped(concreteIndices, true);
|
||||
protected ShardsIterator shards(ClusterState clusterState, IndicesStatsRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allShards(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -84,45 +75,27 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction<Indice
|
||||
return state.blocks().indicesBlockedException(ClusterBlockLevel.METADATA_READ, concreteIndices);
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected IndicesStatsResponse newResponse(IndicesStatsRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
final List<ShardStats> shards = Lists.newArrayList();
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// simply ignore non active shards
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
shards.add((ShardStats) shardResponse);
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new IndicesStatsResponse(shards.toArray(new ShardStats[shards.size()]), clusterState, shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected ShardStats readShardResult(StreamInput in) throws IOException {
|
||||
return ShardStats.readShardStats(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexShardStatsRequest newShardRequest(int numShards, ShardRouting shard, IndicesStatsRequest request) {
|
||||
return new IndexShardStatsRequest(shard.shardId(), request);
|
||||
protected IndicesStatsResponse newResponse(IndicesStatsRequest request, int totalShards, int successfulShards, int failedShards, List<ShardStats> responses, List<ShardOperationFailedException> shardFailures) {
|
||||
return new IndicesStatsResponse(responses.toArray(new ShardStats[responses.size()]), totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardStats newShardResponse() {
|
||||
return new ShardStats();
|
||||
protected IndicesStatsRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
IndicesStatsRequest request = new IndicesStatsRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardStats shardOperation(IndexShardStatsRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||
protected ShardStats shardOperation(IndicesStatsRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id());
|
||||
// if we don't have the routing entry yet, we need it stats wise, we treat it as if the shard is not ready yet
|
||||
if (indexShard.routingEntry() == null) {
|
||||
throw new ShardNotFoundException(indexShard.shardId());
|
||||
@ -130,92 +103,65 @@ public class TransportIndicesStatsAction extends TransportBroadcastAction<Indice
|
||||
|
||||
CommonStatsFlags flags = new CommonStatsFlags().clear();
|
||||
|
||||
if (request.request.docs()) {
|
||||
if (request.docs()) {
|
||||
flags.set(CommonStatsFlags.Flag.Docs);
|
||||
}
|
||||
if (request.request.store()) {
|
||||
if (request.store()) {
|
||||
flags.set(CommonStatsFlags.Flag.Store);
|
||||
}
|
||||
if (request.request.indexing()) {
|
||||
if (request.indexing()) {
|
||||
flags.set(CommonStatsFlags.Flag.Indexing);
|
||||
flags.types(request.request.types());
|
||||
flags.types(request.types());
|
||||
}
|
||||
if (request.request.get()) {
|
||||
if (request.get()) {
|
||||
flags.set(CommonStatsFlags.Flag.Get);
|
||||
}
|
||||
if (request.request.search()) {
|
||||
if (request.search()) {
|
||||
flags.set(CommonStatsFlags.Flag.Search);
|
||||
flags.groups(request.request.groups());
|
||||
flags.groups(request.groups());
|
||||
}
|
||||
if (request.request.merge()) {
|
||||
if (request.merge()) {
|
||||
flags.set(CommonStatsFlags.Flag.Merge);
|
||||
}
|
||||
if (request.request.refresh()) {
|
||||
if (request.refresh()) {
|
||||
flags.set(CommonStatsFlags.Flag.Refresh);
|
||||
}
|
||||
if (request.request.flush()) {
|
||||
if (request.flush()) {
|
||||
flags.set(CommonStatsFlags.Flag.Flush);
|
||||
}
|
||||
if (request.request.warmer()) {
|
||||
if (request.warmer()) {
|
||||
flags.set(CommonStatsFlags.Flag.Warmer);
|
||||
}
|
||||
if (request.request.queryCache()) {
|
||||
if (request.queryCache()) {
|
||||
flags.set(CommonStatsFlags.Flag.QueryCache);
|
||||
}
|
||||
if (request.request.fieldData()) {
|
||||
if (request.fieldData()) {
|
||||
flags.set(CommonStatsFlags.Flag.FieldData);
|
||||
flags.fieldDataFields(request.request.fieldDataFields());
|
||||
flags.fieldDataFields(request.fieldDataFields());
|
||||
}
|
||||
if (request.request.percolate()) {
|
||||
if (request.percolate()) {
|
||||
flags.set(CommonStatsFlags.Flag.Percolate);
|
||||
}
|
||||
if (request.request.segments()) {
|
||||
if (request.segments()) {
|
||||
flags.set(CommonStatsFlags.Flag.Segments);
|
||||
}
|
||||
if (request.request.completion()) {
|
||||
if (request.completion()) {
|
||||
flags.set(CommonStatsFlags.Flag.Completion);
|
||||
flags.completionDataFields(request.request.completionFields());
|
||||
flags.completionDataFields(request.completionFields());
|
||||
}
|
||||
if (request.request.translog()) {
|
||||
if (request.translog()) {
|
||||
flags.set(CommonStatsFlags.Flag.Translog);
|
||||
}
|
||||
if (request.request.suggest()) {
|
||||
if (request.suggest()) {
|
||||
flags.set(CommonStatsFlags.Flag.Suggest);
|
||||
}
|
||||
if (request.request.requestCache()) {
|
||||
if (request.requestCache()) {
|
||||
flags.set(CommonStatsFlags.Flag.RequestCache);
|
||||
}
|
||||
if (request.request.recovery()) {
|
||||
if (request.recovery()) {
|
||||
flags.set(CommonStatsFlags.Flag.Recovery);
|
||||
}
|
||||
|
||||
return new ShardStats(indexShard, indexShard.routingEntry(), flags);
|
||||
}
|
||||
|
||||
static class IndexShardStatsRequest extends BroadcastShardRequest {
|
||||
|
||||
// TODO if there are many indices, the request might hold a large indices array..., we don't really need to serialize it
|
||||
IndicesStatsRequest request;
|
||||
|
||||
IndexShardStatsRequest() {
|
||||
}
|
||||
|
||||
IndexShardStatsRequest(ShardId shardId, IndicesStatsRequest request) {
|
||||
super(shardId, request);
|
||||
this.request = request;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
request = new IndicesStatsRequest();
|
||||
request.readFrom(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
request.writeTo(out);
|
||||
}
|
||||
return new ShardStats(indexShard, flags);
|
||||
}
|
||||
}
|
||||
|
@ -18,13 +18,13 @@
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.template.get;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.action.ActionResponse;
|
||||
import org.elasticsearch.cluster.metadata.IndexTemplateMetaData;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -49,7 +49,7 @@ public class GetIndexTemplatesResponse extends ActionResponse {
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
int size = in.readVInt();
|
||||
indexTemplates = Lists.newArrayListWithExpectedSize(size);
|
||||
indexTemplates = new ArrayList<>(size);
|
||||
for (int i = 0 ; i < size ; i++) {
|
||||
indexTemplates.add(0, IndexTemplateMetaData.Builder.readFrom(in));
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
package org.elasticsearch.action.admin.indices.template.get;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeReadAction;
|
||||
@ -35,6 +34,8 @@ import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -69,9 +70,9 @@ public class TransportGetIndexTemplatesAction extends TransportMasterNodeReadAct
|
||||
|
||||
// If we did not ask for a specific name, then we return all templates
|
||||
if (request.names().length == 0) {
|
||||
results = Lists.newArrayList(state.metaData().templates().values().toArray(IndexTemplateMetaData.class));
|
||||
results = Arrays.asList(state.metaData().templates().values().toArray(IndexTemplateMetaData.class));
|
||||
} else {
|
||||
results = Lists.newArrayList();
|
||||
results = new ArrayList<>();
|
||||
}
|
||||
|
||||
for (String name : request.names()) {
|
||||
|
@ -19,9 +19,9 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -39,7 +39,7 @@ public class IndexUpgradeStatus implements Iterable<IndexShardUpgradeStatus> {
|
||||
for (ShardUpgradeStatus shard : shards) {
|
||||
List<ShardUpgradeStatus> lst = tmpIndexShards.get(shard.getShardRouting().id());
|
||||
if (lst == null) {
|
||||
lst = Lists.newArrayList();
|
||||
lst = new ArrayList<>();
|
||||
tmpIndexShards.put(shard.getShardRouting().id(), lst);
|
||||
}
|
||||
lst.add(shard);
|
||||
|
@ -22,36 +22,31 @@ package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardRequest;
|
||||
import org.elasticsearch.action.support.broadcast.TransportBroadcastAction;
|
||||
import org.elasticsearch.action.support.broadcast.node.TransportBroadcastByNodeAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.engine.Segment;
|
||||
import org.elasticsearch.index.shard.IndexShard;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
public class TransportUpgradeStatusAction extends TransportBroadcastAction<UpgradeStatusRequest, UpgradeStatusResponse, TransportUpgradeStatusAction.IndexShardUpgradeStatusRequest, ShardUpgradeStatus> {
|
||||
public class TransportUpgradeStatusAction extends TransportBroadcastByNodeAction<UpgradeStatusRequest, UpgradeStatusResponse, ShardUpgradeStatus> {
|
||||
|
||||
private final IndicesService indicesService;
|
||||
|
||||
@ -59,7 +54,7 @@ public class TransportUpgradeStatusAction extends TransportBroadcastAction<Upgra
|
||||
public TransportUpgradeStatusAction(Settings settings, ThreadPool threadPool, ClusterService clusterService, TransportService transportService,
|
||||
IndicesService indicesService, ActionFilters actionFilters, IndexNameExpressionResolver indexNameExpressionResolver) {
|
||||
super(settings, UpgradeStatusAction.NAME, threadPool, clusterService, transportService, actionFilters, indexNameExpressionResolver,
|
||||
UpgradeStatusRequest.class, IndexShardUpgradeStatusRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
UpgradeStatusRequest.class, ThreadPool.Names.MANAGEMENT);
|
||||
this.indicesService = indicesService;
|
||||
}
|
||||
|
||||
@ -67,8 +62,8 @@ public class TransportUpgradeStatusAction extends TransportBroadcastAction<Upgra
|
||||
* Getting upgrade stats from *all* active shards.
|
||||
*/
|
||||
@Override
|
||||
protected GroupShardsIterator shards(ClusterState clusterState, UpgradeStatusRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allActiveShardsGrouped(concreteIndices, true);
|
||||
protected ShardsIterator shards(ClusterState clusterState, UpgradeStatusRequest request, String[] concreteIndices) {
|
||||
return clusterState.routingTable().allShards(concreteIndices);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -82,43 +77,26 @@ public class TransportUpgradeStatusAction extends TransportBroadcastAction<Upgra
|
||||
}
|
||||
|
||||
@Override
|
||||
protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, AtomicReferenceArray shardsResponses, ClusterState clusterState) {
|
||||
int successfulShards = 0;
|
||||
int failedShards = 0;
|
||||
List<ShardOperationFailedException> shardFailures = null;
|
||||
final List<ShardUpgradeStatus> shards = newArrayList();
|
||||
for (int i = 0; i < shardsResponses.length(); i++) {
|
||||
Object shardResponse = shardsResponses.get(i);
|
||||
if (shardResponse == null) {
|
||||
// simply ignore non active shards
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
shards.add((ShardUpgradeStatus) shardResponse);
|
||||
successfulShards++;
|
||||
}
|
||||
}
|
||||
return new UpgradeStatusResponse(shards.toArray(new ShardUpgradeStatus[shards.size()]), shardsResponses.length(), successfulShards, failedShards, shardFailures);
|
||||
protected ShardUpgradeStatus readShardResult(StreamInput in) throws IOException {
|
||||
return ShardUpgradeStatus.readShardUpgradeStatus(in);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected IndexShardUpgradeStatusRequest newShardRequest(int numShards, ShardRouting shard, UpgradeStatusRequest request) {
|
||||
return new IndexShardUpgradeStatusRequest(shard.shardId(), request);
|
||||
protected UpgradeStatusResponse newResponse(UpgradeStatusRequest request, int totalShards, int successfulShards, int failedShards, List<ShardUpgradeStatus> responses, List<ShardOperationFailedException> shardFailures) {
|
||||
return new UpgradeStatusResponse(responses.toArray(new ShardUpgradeStatus[responses.size()]), totalShards, successfulShards, failedShards, shardFailures);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeStatus newShardResponse() {
|
||||
return new ShardUpgradeStatus();
|
||||
protected UpgradeStatusRequest readRequestFrom(StreamInput in) throws IOException {
|
||||
UpgradeStatusRequest request = new UpgradeStatusRequest();
|
||||
request.readFrom(in);
|
||||
return request;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected ShardUpgradeStatus shardOperation(IndexShardUpgradeStatusRequest request) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(request.shardId().id());
|
||||
protected ShardUpgradeStatus shardOperation(UpgradeStatusRequest request, ShardRouting shardRouting) {
|
||||
IndexService indexService = indicesService.indexServiceSafe(shardRouting.shardId().getIndex());
|
||||
IndexShard indexShard = indexService.shardSafe(shardRouting.shardId().id());
|
||||
List<Segment> segments = indexShard.engine().segments(false);
|
||||
long total_bytes = 0;
|
||||
long to_upgrade_bytes = 0;
|
||||
@ -137,16 +115,4 @@ public class TransportUpgradeStatusAction extends TransportBroadcastAction<Upgra
|
||||
|
||||
return new ShardUpgradeStatus(indexShard.routingEntry(), total_bytes, to_upgrade_bytes, to_upgrade_bytes_ancient);
|
||||
}
|
||||
|
||||
static class IndexShardUpgradeStatusRequest extends BroadcastShardRequest {
|
||||
|
||||
IndexShardUpgradeStatusRequest() {
|
||||
|
||||
}
|
||||
|
||||
IndexShardUpgradeStatusRequest(ShardId shardId, UpgradeStatusRequest request) {
|
||||
super(shardId, request);
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
@ -21,10 +21,6 @@ package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
public class UpgradeStatusRequest extends BroadcastRequest<UpgradeStatusRequest> {
|
||||
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.admin.indices.upgrade.get;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
@ -31,19 +30,17 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilderString;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
public class UpgradeStatusResponse extends BroadcastResponse implements ToXContent {
|
||||
|
||||
|
||||
private ShardUpgradeStatus[] shards;
|
||||
|
||||
private Map<String, IndexUpgradeStatus> indicesUpgradeStatus;
|
||||
|
||||
UpgradeStatusResponse() {
|
||||
|
||||
}
|
||||
|
||||
UpgradeStatusResponse(ShardUpgradeStatus[] shards, int totalShards, int successfulShards, int failedShards, List<ShardOperationFailedException> shardFailures) {
|
||||
@ -63,7 +60,7 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
||||
}
|
||||
|
||||
for (String index : indices) {
|
||||
List<ShardUpgradeStatus> shards = Lists.newArrayList();
|
||||
List<ShardUpgradeStatus> shards = new ArrayList<>();
|
||||
for (ShardUpgradeStatus shard : this.shards) {
|
||||
if (shard.getShardRouting().index().equals(index)) {
|
||||
shards.add(shard);
|
||||
@ -75,7 +72,6 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
||||
return indicesUpgradeStats;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
@ -120,8 +116,6 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
||||
|
||||
@Override
|
||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
||||
|
||||
|
||||
builder.byteSizeField(Fields.SIZE_IN_BYTES, Fields.SIZE, getTotalBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_IN_BYTES, Fields.SIZE_TO_UPGRADE, getToUpgradeBytes());
|
||||
builder.byteSizeField(Fields.SIZE_TO_UPGRADE_ANCIENT_IN_BYTES, Fields.SIZE_TO_UPGRADE_ANCIENT, getToUpgradeBytesAncient());
|
||||
@ -163,10 +157,8 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
||||
}
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
|
||||
builder.endObject();
|
||||
}
|
||||
return builder;
|
||||
@ -186,6 +178,5 @@ public class UpgradeStatusResponse extends BroadcastResponse implements ToXConte
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT = new XContentBuilderString("size_to_upgrade_ancient");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_IN_BYTES = new XContentBuilderString("size_to_upgrade_in_bytes");
|
||||
static final XContentBuilderString SIZE_TO_UPGRADE_ANCIENT_IN_BYTES = new XContentBuilderString("size_to_upgrade_ancient_in_bytes");
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -33,7 +33,10 @@ import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockLevel;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.GroupShardsIterator;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -42,12 +45,12 @@ import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static com.google.common.collect.Sets.newHashSet;
|
||||
|
||||
@ -84,7 +87,7 @@ public class TransportUpgradeAction extends TransportBroadcastAction<UpgradeRequ
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
|
@ -54,14 +54,13 @@ import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@ -136,7 +135,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
@ -144,7 +143,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
valid = valid && validateQueryResponse.isValid();
|
||||
if (request.explain() || request.rewrite()) {
|
||||
if (queryExplanations == null) {
|
||||
queryExplanations = newArrayList();
|
||||
queryExplanations = new ArrayList<>();
|
||||
}
|
||||
queryExplanations.add(new QueryExplanation(
|
||||
validateQueryResponse.getIndex(),
|
||||
@ -197,7 +196,7 @@ public class TransportValidateQueryAction extends TransportBroadcastAction<Valid
|
||||
valid = false;
|
||||
error = e.getMessage();
|
||||
} finally {
|
||||
SearchContext.current().close();
|
||||
searchContext.close();
|
||||
SearchContext.removeCurrent();
|
||||
}
|
||||
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
package org.elasticsearch.action.admin.indices.warmer.delete;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.master.TransportMasterNodeAction;
|
||||
@ -39,6 +38,7 @@ import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
@ -92,6 +92,14 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeAction<Delet
|
||||
MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData());
|
||||
|
||||
boolean globalFoundAtLeastOne = false;
|
||||
boolean deleteAll = false;
|
||||
for (int i=0; i<request.names().length; i++){
|
||||
if (request.names()[i].equals(MetaData.ALL)) {
|
||||
deleteAll = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
for (String index : concreteIndices) {
|
||||
IndexMetaData indexMetaData = currentState.metaData().index(index);
|
||||
if (indexMetaData == null) {
|
||||
@ -99,11 +107,11 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeAction<Delet
|
||||
}
|
||||
IndexWarmersMetaData warmers = indexMetaData.custom(IndexWarmersMetaData.TYPE);
|
||||
if (warmers != null) {
|
||||
List<IndexWarmersMetaData.Entry> entries = Lists.newArrayList();
|
||||
List<IndexWarmersMetaData.Entry> entries = new ArrayList<>();
|
||||
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
|
||||
boolean keepWarmer = true;
|
||||
for (String warmer : request.names()) {
|
||||
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals("_all")) {
|
||||
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
|
||||
globalFoundAtLeastOne = true;
|
||||
keepWarmer = false;
|
||||
// don't add it...
|
||||
@ -123,7 +131,7 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeAction<Delet
|
||||
}
|
||||
}
|
||||
|
||||
if (!globalFoundAtLeastOne) {
|
||||
if (globalFoundAtLeastOne == false && deleteAll == false) {
|
||||
throw new IndexWarmerMissingException(request.names());
|
||||
}
|
||||
|
||||
@ -137,11 +145,13 @@ public class TransportDeleteWarmerAction extends TransportMasterNodeAction<Delet
|
||||
if (warmers != null) {
|
||||
for (IndexWarmersMetaData.Entry entry : warmers.entries()) {
|
||||
for (String warmer : request.names()) {
|
||||
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals("_all")) {
|
||||
if (Regex.simpleMatch(warmer, entry.name()) || warmer.equals(MetaData.ALL)) {
|
||||
logger.info("[{}] delete warmer [{}]", index, entry.name());
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if(deleteAll){
|
||||
logger.debug("no warmers to delete on index [{}]", index);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
@ -57,7 +55,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
|
||||
private static final int REQUEST_OVERHEAD = 50;
|
||||
|
||||
final List<ActionRequest> requests = Lists.newArrayList();
|
||||
final List<ActionRequest> requests = new ArrayList<>();
|
||||
List<Object> payloads = null;
|
||||
|
||||
protected TimeValue timeout = BulkShardRequest.DEFAULT_TIMEOUT;
|
||||
@ -186,7 +184,7 @@ public class BulkRequest extends ActionRequest<BulkRequest> implements Composite
|
||||
@Override
|
||||
@SuppressWarnings("unchecked")
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
List<IndicesRequest> indicesRequests = Lists.newArrayList();
|
||||
List<IndicesRequest> indicesRequests = new ArrayList<>();
|
||||
for (ActionRequest request : requests) {
|
||||
assert request instanceof IndicesRequest;
|
||||
indicesRequests.add((IndicesRequest) request);
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.bulk;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
@ -57,7 +56,13 @@ import org.elasticsearch.indices.IndexClosedException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
/**
|
||||
@ -251,7 +256,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, indexRequest.type(), indexRequest.id(), indexRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = Lists.newArrayList();
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
@ -265,7 +270,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
for (ShardIterator shardIt : groupShards) {
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardIt.shardId());
|
||||
if (list == null) {
|
||||
list = Lists.newArrayList();
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardIt.shardId(), list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, new DeleteRequest(deleteRequest)));
|
||||
@ -274,7 +279,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
ShardId shardId = clusterService.operationRouting().deleteShards(clusterState, concreteIndex, deleteRequest.type(), deleteRequest.id(), deleteRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = Lists.newArrayList();
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
@ -292,7 +297,7 @@ public class TransportBulkAction extends HandledTransportAction<BulkRequest, Bul
|
||||
ShardId shardId = clusterService.operationRouting().indexShards(clusterState, concreteIndex, updateRequest.type(), updateRequest.id(), updateRequest.routing()).shardId();
|
||||
List<BulkItemRequest> list = requestsByShard.get(shardId);
|
||||
if (list == null) {
|
||||
list = Lists.newArrayList();
|
||||
list = new ArrayList<>();
|
||||
requestsByShard.put(shardId, list);
|
||||
}
|
||||
list.add(new BulkItemRequest(i, request));
|
||||
|
@ -54,13 +54,13 @@ import org.elasticsearch.search.query.QueryPhaseExecutionException;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static org.elasticsearch.action.exists.ExistsRequest.DEFAULT_MIN_SCORE;
|
||||
|
||||
public class TransportExistsAction extends TransportBroadcastAction<ExistsRequest, ExistsResponse, ShardExistsRequest, ShardExistsResponse> {
|
||||
@ -131,7 +131,7 @@ public class TransportExistsAction extends TransportBroadcastAction<ExistsReques
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
@ -174,9 +174,8 @@ public class TransportExistsAction extends TransportBroadcastAction<ExistsReques
|
||||
}
|
||||
context.preProcess();
|
||||
try {
|
||||
Lucene.EarlyTerminatingCollector existsCollector = Lucene.createExistsCollector();
|
||||
Lucene.exists(context.searcher(), context.query(), existsCollector);
|
||||
return new ShardExistsResponse(request.shardId(), existsCollector.exists());
|
||||
boolean exists = Lucene.exists(context, context.query(), Lucene.createExistsCollector());
|
||||
return new ShardExistsResponse(request.shardId(), exists);
|
||||
} catch (Exception e) {
|
||||
throw new QueryPhaseExecutionException(context, "failed to execute exists", e);
|
||||
}
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
package org.elasticsearch.action.percolate;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
@ -35,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -50,7 +50,7 @@ public class MultiPercolateRequest extends ActionRequest<MultiPercolateRequest>
|
||||
private String[] indices;
|
||||
private String documentType;
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||
private List<PercolateRequest> requests = Lists.newArrayList();
|
||||
private List<PercolateRequest> requests = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Embeds a percolate request to this multi percolate request
|
||||
@ -163,7 +163,7 @@ public class MultiPercolateRequest extends ActionRequest<MultiPercolateRequest>
|
||||
|
||||
@Override
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
List<IndicesRequest> indicesRequests = Lists.newArrayList();
|
||||
List<IndicesRequest> indicesRequests = new ArrayList<>();
|
||||
for (PercolateRequest percolateRequest : this.requests) {
|
||||
indicesRequests.addAll(percolateRequest.subRequests());
|
||||
}
|
||||
|
@ -18,7 +18,6 @@
|
||||
*/
|
||||
package org.elasticsearch.action.percolate;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
@ -35,6 +34,7 @@ import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -79,7 +79,7 @@ public class PercolateRequest extends BroadcastRequest<PercolateRequest> impleme
|
||||
|
||||
@Override
|
||||
public List<? extends IndicesRequest> subRequests() {
|
||||
List<IndicesRequest> requests = Lists.newArrayList();
|
||||
List<IndicesRequest> requests = new ArrayList<>();
|
||||
requests.add(this);
|
||||
if (getRequest != null) {
|
||||
requests.add(getRequest);
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.percolate;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.elasticsearch.ElasticsearchGenerationException;
|
||||
import org.elasticsearch.action.support.ToXContentToBytes;
|
||||
import org.elasticsearch.client.Requests;
|
||||
@ -38,6 +36,7 @@ import org.elasticsearch.search.sort.ScoreSortBuilder;
|
||||
import org.elasticsearch.search.sort.SortBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
@ -99,7 +98,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
||||
*/
|
||||
public PercolateSourceBuilder addSort(SortBuilder sort) {
|
||||
if (sorts == null) {
|
||||
sorts = Lists.newArrayList();
|
||||
sorts = new ArrayList<>();
|
||||
}
|
||||
sorts.add(sort);
|
||||
return this;
|
||||
@ -127,7 +126,7 @@ public class PercolateSourceBuilder extends ToXContentToBytes {
|
||||
*/
|
||||
public PercolateSourceBuilder addAggregation(AbstractAggregationBuilder aggregationBuilder) {
|
||||
if (aggregations == null) {
|
||||
aggregations = Lists.newArrayList();
|
||||
aggregations = new ArrayList<>();
|
||||
}
|
||||
aggregations.add(aggregationBuilder);
|
||||
return this;
|
||||
|
@ -43,13 +43,12 @@ import org.elasticsearch.percolator.PercolatorService;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
@ -126,7 +125,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
@ -135,7 +134,7 @@ public class TransportPercolateAction extends TransportBroadcastAction<Percolate
|
||||
if (!percolateShardResponse.isEmpty()) {
|
||||
if (shardResults == null) {
|
||||
percolatorTypeId = percolateShardResponse.percolatorTypeId();
|
||||
shardResults = newArrayList();
|
||||
shardResults = new ArrayList<>();
|
||||
}
|
||||
shardResults.add(percolateShardResponse);
|
||||
}
|
||||
|
@ -25,10 +25,10 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
|
||||
/**
|
||||
@ -47,7 +47,7 @@ public class ClearScrollRequest extends ActionRequest<ClearScrollRequest> {
|
||||
|
||||
public void addScrollId(String scrollId) {
|
||||
if (scrollIds == null) {
|
||||
scrollIds = newArrayList();
|
||||
scrollIds = new ArrayList<>();
|
||||
}
|
||||
scrollIds.add(scrollId);
|
||||
}
|
||||
|
@ -19,8 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
|
||||
import org.elasticsearch.action.ActionRequest;
|
||||
import org.elasticsearch.action.ActionRequestValidationException;
|
||||
import org.elasticsearch.action.CompositeIndicesRequest;
|
||||
@ -41,16 +39,14 @@ import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static org.elasticsearch.action.ValidateActions.addValidationError;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeBooleanValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringArrayValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.nodeStringValue;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.*;
|
||||
|
||||
/**
|
||||
* A multi search API request.
|
||||
*/
|
||||
public class MultiSearchRequest extends ActionRequest<MultiSearchRequest> implements CompositeIndicesRequest {
|
||||
|
||||
private List<SearchRequest> requests = Lists.newArrayList();
|
||||
private List<SearchRequest> requests = new ArrayList<>();
|
||||
|
||||
private IndicesOptions indicesOptions = IndicesOptions.strictExpandOpenAndForbidClosed();
|
||||
|
||||
|
@ -20,6 +20,7 @@
|
||||
package org.elasticsearch.action.search;
|
||||
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.search.type.ScrollIdForNode;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
@ -27,7 +28,6 @@ import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.util.concurrent.CountDown;
|
||||
@ -69,7 +69,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
final DiscoveryNodes nodes;
|
||||
final CountDown expectedOps;
|
||||
final ClearScrollRequest request;
|
||||
final List<Tuple<String, Long>[]> contexts = new ArrayList<>();
|
||||
final List<ScrollIdForNode[]> contexts = new ArrayList<>();
|
||||
final ActionListener<ClearScrollResponse> listener;
|
||||
final AtomicReference<Throwable> expHolder;
|
||||
final AtomicInteger numberOfFreedSearchContexts = new AtomicInteger(0);
|
||||
@ -81,7 +81,7 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
expectedOps = nodes.size();
|
||||
} else {
|
||||
for (String parsedScrollId : request.getScrollIds()) {
|
||||
Tuple<String, Long>[] context = parseScrollId(parsedScrollId).getContext();
|
||||
ScrollIdForNode[] context = parseScrollId(parsedScrollId).getContext();
|
||||
expectedOps += context.length;
|
||||
this.contexts.add(context);
|
||||
}
|
||||
@ -114,15 +114,15 @@ public class TransportClearScrollAction extends HandledTransportAction<ClearScro
|
||||
});
|
||||
}
|
||||
} else {
|
||||
for (Tuple<String, Long>[] context : contexts) {
|
||||
for (Tuple<String, Long> target : context) {
|
||||
final DiscoveryNode node = nodes.get(target.v1());
|
||||
for (ScrollIdForNode[] context : contexts) {
|
||||
for (ScrollIdForNode target : context) {
|
||||
final DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
onFreedContext(false);
|
||||
continue;
|
||||
}
|
||||
|
||||
searchServiceTransportAction.sendFreeContext(node, target.v2(), request, new ActionListener<SearchServiceTransportAction.SearchFreeContextResponse>() {
|
||||
searchServiceTransportAction.sendFreeContext(node, target.getScrollId(), request, new ActionListener<SearchServiceTransportAction.SearchFreeContextResponse>() {
|
||||
@Override
|
||||
public void onResponse(SearchServiceTransportAction.SearchFreeContextResponse freed) {
|
||||
onFreedContext(freed.isFreed());
|
||||
|
@ -19,9 +19,6 @@
|
||||
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
@ -39,11 +36,11 @@ public class ParsedScrollId {
|
||||
|
||||
private final String type;
|
||||
|
||||
private final Tuple<String, Long>[] context;
|
||||
private final ScrollIdForNode[] context;
|
||||
|
||||
private final Map<String, String> attributes;
|
||||
|
||||
public ParsedScrollId(String source, String type, Tuple<String, Long>[] context, Map<String, String> attributes) {
|
||||
public ParsedScrollId(String source, String type, ScrollIdForNode[] context, Map<String, String> attributes) {
|
||||
this.source = source;
|
||||
this.type = type;
|
||||
this.context = context;
|
||||
@ -58,7 +55,7 @@ public class ParsedScrollId {
|
||||
return type;
|
||||
}
|
||||
|
||||
public Tuple<String, Long>[] getContext() {
|
||||
public ScrollIdForNode[] getContext() {
|
||||
return context;
|
||||
}
|
||||
|
||||
|
@ -17,20 +17,22 @@
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.admin.indices.optimize;
|
||||
package org.elasticsearch.action.search.type;
|
||||
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardResponse;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
public class ScrollIdForNode {
|
||||
private final String node;
|
||||
private final long scrollId;
|
||||
|
||||
/**
|
||||
*
|
||||
*/
|
||||
class ShardOptimizeResponse extends BroadcastShardResponse {
|
||||
|
||||
ShardOptimizeResponse() {
|
||||
public ScrollIdForNode(String node, long scrollId) {
|
||||
this.node = node;
|
||||
this.scrollId = scrollId;
|
||||
}
|
||||
|
||||
ShardOptimizeResponse(ShardId shardId) {
|
||||
super(shardId);
|
||||
public String getNode() {
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
public long getScrollId() {
|
||||
return scrollId;
|
||||
}
|
||||
}
|
@ -30,7 +30,6 @@ import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.Strings;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.util.concurrent.AtomicArray;
|
||||
import org.elasticsearch.search.SearchPhaseResult;
|
||||
import org.elasticsearch.search.internal.InternalScrollSearchRequest;
|
||||
@ -103,14 +102,14 @@ public abstract class TransportSearchHelper {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
|
||||
@SuppressWarnings({"unchecked"}) Tuple<String, Long>[] context = new Tuple[contextSize];
|
||||
ScrollIdForNode[] context = new ScrollIdForNode[contextSize];
|
||||
for (int i = 0; i < contextSize; i++) {
|
||||
String element = elements[index++];
|
||||
int sep = element.indexOf(':');
|
||||
if (sep == -1) {
|
||||
throw new IllegalArgumentException("Malformed scrollId [" + scrollId + "]");
|
||||
}
|
||||
context[i] = new Tuple<>(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
|
||||
context[i] = new ScrollIdForNode(element.substring(sep + 1), Long.parseLong(element.substring(0, sep)));
|
||||
}
|
||||
Map<String, String> attributes;
|
||||
int attributesSize = Integer.parseInt(elements[index++]);
|
||||
|
@ -25,7 +25,6 @@ import org.elasticsearch.action.search.*;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -115,15 +114,15 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
|
||||
return;
|
||||
}
|
||||
|
||||
Tuple<String, Long>[] context = scrollId.getContext();
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
Tuple<String, Long> target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.v2());
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
@ -132,11 +131,11 @@ public class TransportSearchScrollQueryAndFetchAction extends AbstractComponent
|
||||
}
|
||||
}
|
||||
|
||||
for (Tuple<String, Long> target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
@ -26,7 +26,6 @@ import org.elasticsearch.action.search.*;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -124,15 +123,15 @@ public class TransportSearchScrollQueryThenFetchAction extends AbstractComponent
|
||||
}
|
||||
final AtomicInteger counter = new AtomicInteger(scrollId.getContext().length);
|
||||
|
||||
Tuple<String, Long>[] context = scrollId.getContext();
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
Tuple<String, Long> target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executeQueryPhase(i, counter, node, target.v2());
|
||||
executeQueryPhase(i, counter, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
@ -28,7 +28,6 @@ import org.elasticsearch.action.search.ShardSearchFailure;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.common.collect.Tuple;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
@ -125,15 +124,15 @@ public class TransportSearchScrollScanAction extends AbstractComponent {
|
||||
return;
|
||||
}
|
||||
|
||||
Tuple<String, Long>[] context = scrollId.getContext();
|
||||
ScrollIdForNode[] context = scrollId.getContext();
|
||||
for (int i = 0; i < context.length; i++) {
|
||||
Tuple<String, Long> target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
ScrollIdForNode target = context[i];
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node != null) {
|
||||
executePhase(i, node, target.v2());
|
||||
executePhase(i, node, target.getScrollId());
|
||||
} else {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [" + target.getScrollId() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
@ -142,11 +141,11 @@ public class TransportSearchScrollScanAction extends AbstractComponent {
|
||||
}
|
||||
}
|
||||
|
||||
for (Tuple<String, Long> target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.v1());
|
||||
for (ScrollIdForNode target : scrollId.getContext()) {
|
||||
DiscoveryNode node = nodes.get(target.getNode());
|
||||
if (node == null) {
|
||||
if (logger.isDebugEnabled()) {
|
||||
logger.debug("Node [" + target.v1() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
logger.debug("Node [" + target.getNode() + "] not available for scroll request [" + scrollId.getSource() + "]");
|
||||
}
|
||||
successfulOps.decrementAndGet();
|
||||
if (counter.decrementAndGet() == 0) {
|
||||
|
@ -48,14 +48,13 @@ import org.elasticsearch.search.suggest.SuggestionSearchContext;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Defines the transport of a suggestion request across the cluster
|
||||
*/
|
||||
@ -115,7 +114,7 @@ public class TransportSuggestAction extends TransportBroadcastAction<SuggestRequ
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
|
@ -19,13 +19,13 @@
|
||||
|
||||
package org.elasticsearch.action.support;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.ListenableActionFuture;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.logging.Loggers;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -66,7 +66,7 @@ public abstract class AbstractListenableActionFuture<T, L> extends AdapterAction
|
||||
((List) this.listeners).add(listener);
|
||||
} else {
|
||||
Object orig = listeners;
|
||||
listeners = Lists.newArrayListWithCapacity(2);
|
||||
listeners = new ArrayList<>(2);
|
||||
((List) listeners).add(orig);
|
||||
((List) listeners).add(listener);
|
||||
}
|
||||
|
@ -47,7 +47,6 @@ import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
public abstract class TransportBroadcastAction<Request extends BroadcastRequest, Response extends BroadcastResponse, ShardRequest extends BroadcastShardRequest, ShardResponse extends BroadcastShardResponse>
|
||||
extends HandledTransportAction<Request, Response> {
|
||||
|
||||
protected final ThreadPool threadPool;
|
||||
protected final ClusterService clusterService;
|
||||
protected final TransportService transportService;
|
||||
|
||||
@ -59,7 +58,6 @@ public abstract class TransportBroadcastAction<Request extends BroadcastRequest,
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
this.threadPool = threadPool;
|
||||
this.transportShardAction = actionName + "[s]";
|
||||
|
||||
transportService.registerRequestHandler(transportShardAction, shardRequest, shardExecutor, new ShardTransportHandler());
|
||||
|
@ -0,0 +1,559 @@
|
||||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.action.support.broadcast.node;
|
||||
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.FailedNodeException;
|
||||
import org.elasticsearch.action.IndicesRequest;
|
||||
import org.elasticsearch.action.NoShardAvailableActionException;
|
||||
import org.elasticsearch.action.ShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.ActionFilters;
|
||||
import org.elasticsearch.action.support.DefaultShardOperationFailedException;
|
||||
import org.elasticsearch.action.support.HandledTransportAction;
|
||||
import org.elasticsearch.action.support.IndicesOptions;
|
||||
import org.elasticsearch.action.support.TransportActions;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastRequest;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastResponse;
|
||||
import org.elasticsearch.action.support.broadcast.BroadcastShardOperationFailedException;
|
||||
import org.elasticsearch.cluster.ClusterService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlockException;
|
||||
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.ShardsIterator;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.NodeShouldNotConnectException;
|
||||
import org.elasticsearch.transport.TransportChannel;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequest;
|
||||
import org.elasticsearch.transport.TransportRequestHandler;
|
||||
import org.elasticsearch.transport.TransportResponse;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.concurrent.Callable;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
/**
|
||||
* Abstraction for transporting aggregated shard-level operations in a single request (NodeRequest) per-node
|
||||
* and executing the shard-level operations serially on the receiving node. Each shard-level operation can produce a
|
||||
* result (ShardOperationResult), these per-node shard-level results are aggregated into a single result
|
||||
* (BroadcastByNodeResponse) to the coordinating node. These per-node results are aggregated into a single result (Result)
|
||||
* to the client.
|
||||
*
|
||||
* @param <Request> the underlying client request
|
||||
* @param <Response> the response to the client request
|
||||
* @param <ShardOperationResult> per-shard operation results
|
||||
*/
|
||||
public abstract class TransportBroadcastByNodeAction<Request extends BroadcastRequest,
|
||||
Response extends BroadcastResponse,
|
||||
ShardOperationResult extends Streamable> extends HandledTransportAction<Request, Response> {
|
||||
|
||||
private final ClusterService clusterService;
|
||||
private final TransportService transportService;
|
||||
|
||||
final String transportNodeBroadcastAction;
|
||||
|
||||
public TransportBroadcastByNodeAction(
|
||||
Settings settings,
|
||||
String actionName,
|
||||
ThreadPool threadPool,
|
||||
ClusterService clusterService,
|
||||
TransportService transportService,
|
||||
ActionFilters actionFilters,
|
||||
IndexNameExpressionResolver indexNameExpressionResolver,
|
||||
Class<Request> request,
|
||||
String executor) {
|
||||
super(settings, actionName, threadPool, transportService, actionFilters, indexNameExpressionResolver, request);
|
||||
|
||||
this.clusterService = clusterService;
|
||||
this.transportService = transportService;
|
||||
|
||||
transportNodeBroadcastAction = actionName + "[n]";
|
||||
|
||||
transportService.registerRequestHandler(transportNodeBroadcastAction, new Callable<NodeRequest>() {
|
||||
@Override
|
||||
public NodeRequest call() throws Exception {
|
||||
return new NodeRequest();
|
||||
}
|
||||
}, executor, new BroadcastByNodeTransportRequestHandler());
|
||||
}
|
||||
|
||||
private final Response newResponse(Request request, AtomicReferenceArray responses, List<NoShardAvailableActionException> unavailableShardExceptions, Map<String, List<ShardRouting>> nodes) {
|
||||
int totalShards = 0;
|
||||
int successfulShards = 0;
|
||||
List<ShardOperationResult> broadcastByNodeResponses = new ArrayList<>();
|
||||
List<ShardOperationFailedException> exceptions = new ArrayList<>();
|
||||
for (int i = 0; i < responses.length(); i++) {
|
||||
if (responses.get(i) instanceof FailedNodeException) {
|
||||
FailedNodeException exception = (FailedNodeException) responses.get(i);
|
||||
totalShards += nodes.get(exception.nodeId()).size();
|
||||
for (ShardRouting shard : nodes.get(exception.nodeId())) {
|
||||
exceptions.add(new DefaultShardOperationFailedException(shard.getIndex(), shard.getId(), exception));
|
||||
}
|
||||
} else {
|
||||
NodeResponse response = (NodeResponse) responses.get(i);
|
||||
broadcastByNodeResponses.addAll(response.results);
|
||||
totalShards += response.getTotalShards();
|
||||
successfulShards += response.getSuccessfulShards();
|
||||
for (BroadcastShardOperationFailedException throwable : response.getExceptions()) {
|
||||
if (!TransportActions.isShardNotAvailableException(throwable)) {
|
||||
exceptions.add(new DefaultShardOperationFailedException(throwable.getIndex(), throwable.getShardId().getId(), throwable));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
totalShards += unavailableShardExceptions.size();
|
||||
int failedShards = exceptions.size();
|
||||
return newResponse(request, totalShards, successfulShards, failedShards, broadcastByNodeResponses, exceptions);
|
||||
}
|
||||
|
||||
/**
|
||||
* Deserialize a shard-level result from an input stream
|
||||
*
|
||||
* @param in input stream
|
||||
* @return a deserialized shard-level result
|
||||
* @throws IOException
|
||||
*/
|
||||
protected abstract ShardOperationResult readShardResult(StreamInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Creates a new response to the underlying request.
|
||||
*
|
||||
* @param request the underlying request
|
||||
* @param totalShards the total number of shards considered for execution of the operation
|
||||
* @param successfulShards the total number of shards for which execution of the operation was successful
|
||||
* @param failedShards the total number of shards for which execution of the operation failed
|
||||
* @param results the per-node aggregated shard-level results
|
||||
* @param shardFailures the exceptions corresponding to shard operationa failures
|
||||
* @return the response
|
||||
*/
|
||||
protected abstract Response newResponse(Request request, int totalShards, int successfulShards, int failedShards, List<ShardOperationResult> results, List<ShardOperationFailedException> shardFailures);
|
||||
|
||||
/**
|
||||
* Deserialize a request from an input stream
|
||||
*
|
||||
* @param in input stream
|
||||
* @return a de-serialized request
|
||||
* @throws IOException
|
||||
*/
|
||||
protected abstract Request readRequestFrom(StreamInput in) throws IOException;
|
||||
|
||||
/**
|
||||
* Executes the shard-level operation. This method is called once per shard serially on the receiving node.
|
||||
*
|
||||
* @param request the node-level request
|
||||
* @param shardRouting the shard on which to execute the operation
|
||||
* @return the result of the shard-level operation for the shard
|
||||
*/
|
||||
protected abstract ShardOperationResult shardOperation(Request request, ShardRouting shardRouting);
|
||||
|
||||
/**
|
||||
* Determines the shards on which this operation will be executed on. The operation is executed once per shard.
|
||||
*
|
||||
* @param clusterState the cluster state
|
||||
* @param request the underlying request
|
||||
* @param concreteIndices the concrete indices on which to execute the operation
|
||||
* @return the shards on which to execute the operation
|
||||
*/
|
||||
protected abstract ShardsIterator shards(ClusterState clusterState, Request request, String[] concreteIndices);
|
||||
|
||||
/**
|
||||
* Executes a global block check before polling the cluster state.
|
||||
*
|
||||
* @param state the cluster state
|
||||
* @param request the underlying request
|
||||
* @return a non-null exception if the operation is blocked
|
||||
*/
|
||||
protected abstract ClusterBlockException checkGlobalBlock(ClusterState state, Request request);
|
||||
|
||||
/**
|
||||
* Executes a global request-level check before polling the cluster state.
|
||||
*
|
||||
* @param state the cluster state
|
||||
* @param request the underlying request
|
||||
* @param concreteIndices the concrete indices on which to execute the operation
|
||||
* @return a non-null exception if the operation if blocked
|
||||
*/
|
||||
protected abstract ClusterBlockException checkRequestBlock(ClusterState state, Request request, String[] concreteIndices);
|
||||
|
||||
@Override
|
||||
protected void doExecute(Request request, ActionListener<Response> listener) {
|
||||
new AsyncAction(request, listener).start();
|
||||
}
|
||||
|
||||
protected class AsyncAction {
|
||||
private final Request request;
|
||||
private final ActionListener<Response> listener;
|
||||
private final ClusterState clusterState;
|
||||
private final DiscoveryNodes nodes;
|
||||
private final Map<String, List<ShardRouting>> nodeIds;
|
||||
private final AtomicReferenceArray<Object> responses;
|
||||
private final AtomicInteger counter = new AtomicInteger();
|
||||
private List<NoShardAvailableActionException> unavailableShardExceptions = new ArrayList<>();
|
||||
|
||||
protected AsyncAction(Request request, ActionListener<Response> listener) {
|
||||
this.request = request;
|
||||
this.listener = listener;
|
||||
|
||||
clusterState = clusterService.state();
|
||||
nodes = clusterState.nodes();
|
||||
|
||||
ClusterBlockException globalBlockException = checkGlobalBlock(clusterState, request);
|
||||
if (globalBlockException != null) {
|
||||
throw globalBlockException;
|
||||
}
|
||||
|
||||
String[] concreteIndices = indexNameExpressionResolver.concreteIndices(clusterState, request);
|
||||
ClusterBlockException requestBlockException = checkRequestBlock(clusterState, request, concreteIndices);
|
||||
if (requestBlockException != null) {
|
||||
throw requestBlockException;
|
||||
}
|
||||
|
||||
logger.trace("resolving shards for [{}] based on cluster state version [{}]", actionName, clusterState.version());
|
||||
ShardsIterator shardIt = shards(clusterState, request, concreteIndices);
|
||||
nodeIds = Maps.newHashMap();
|
||||
|
||||
for (ShardRouting shard : shardIt.asUnordered()) {
|
||||
if (shard.assignedToNode()) {
|
||||
String nodeId = shard.currentNodeId();
|
||||
if (!nodeIds.containsKey(nodeId)) {
|
||||
nodeIds.put(nodeId, new ArrayList<ShardRouting>());
|
||||
}
|
||||
nodeIds.get(nodeId).add(shard);
|
||||
} else {
|
||||
unavailableShardExceptions.add(
|
||||
new NoShardAvailableActionException(
|
||||
shard.shardId(),
|
||||
" no shards available for shard " + shard.toString() + " while executing " + actionName
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
responses = new AtomicReferenceArray<>(nodeIds.size());
|
||||
}
|
||||
|
||||
public void start() {
|
||||
if (nodeIds.size() == 0) {
|
||||
try {
|
||||
onCompletion();
|
||||
} catch (Throwable e) {
|
||||
listener.onFailure(e);
|
||||
}
|
||||
} else {
|
||||
int nodeIndex = -1;
|
||||
for (Map.Entry<String, List<ShardRouting>> entry : nodeIds.entrySet()) {
|
||||
nodeIndex++;
|
||||
DiscoveryNode node = nodes.get(entry.getKey());
|
||||
sendNodeRequest(node, entry.getValue(), nodeIndex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void sendNodeRequest(final DiscoveryNode node, List<ShardRouting> shards, final int nodeIndex) {
|
||||
try {
|
||||
NodeRequest nodeRequest = new NodeRequest(node.getId(), request, shards);
|
||||
transportService.sendRequest(node, transportNodeBroadcastAction, nodeRequest, new BaseTransportResponseHandler<NodeResponse>() {
|
||||
@Override
|
||||
public NodeResponse newInstance() {
|
||||
return new NodeResponse();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleResponse(NodeResponse response) {
|
||||
onNodeResponse(node, nodeIndex, response);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handleException(TransportException exp) {
|
||||
onNodeFailure(node, nodeIndex, exp);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String executor() {
|
||||
return ThreadPool.Names.SAME;
|
||||
}
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
onNodeFailure(node, nodeIndex, e);
|
||||
}
|
||||
}
|
||||
|
||||
protected void onNodeResponse(DiscoveryNode node, int nodeIndex, NodeResponse response) {
|
||||
logger.trace("received response for [{}] from node [{}]", actionName, node.id());
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
// the current implementation of TransportService#sendRequest guards against this
|
||||
// but concurrency is hard, safety is important, and the small performance loss here does not matter
|
||||
if (responses.compareAndSet(nodeIndex, null, response)) {
|
||||
if (counter.incrementAndGet() == responses.length()) {
|
||||
onCompletion();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void onNodeFailure(DiscoveryNode node, int nodeIndex, Throwable t) {
|
||||
String nodeId = node.id();
|
||||
if (logger.isDebugEnabled() && !(t instanceof NodeShouldNotConnectException)) {
|
||||
logger.debug("failed to execute [{}] on node [{}]", t, actionName, nodeId);
|
||||
}
|
||||
|
||||
// this is defensive to protect against the possibility of double invocation
|
||||
// the current implementation of TransportService#sendRequest guards against this
|
||||
// but concurrency is hard, safety is important, and the small performance loss here does not matter
|
||||
if (responses.compareAndSet(nodeIndex, null, new FailedNodeException(nodeId, "Failed node [" + nodeId + "]", t))) {
|
||||
if (counter.incrementAndGet() == responses.length()) {
|
||||
onCompletion();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected void onCompletion() {
|
||||
Response response = null;
|
||||
try {
|
||||
response = newResponse(request, responses, unavailableShardExceptions, nodeIds);
|
||||
} catch (Throwable t) {
|
||||
logger.debug("failed to combine responses from nodes", t);
|
||||
listener.onFailure(t);
|
||||
}
|
||||
if (response != null) {
|
||||
try {
|
||||
listener.onResponse(response);
|
||||
} catch (Throwable t) {
|
||||
listener.onFailure(t);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class BroadcastByNodeTransportRequestHandler implements TransportRequestHandler<NodeRequest> {
|
||||
@Override
|
||||
public void messageReceived(final NodeRequest request, TransportChannel channel) throws Exception {
|
||||
List<ShardRouting> shards = request.getShards();
|
||||
final int totalShards = shards.size();
|
||||
logger.trace("[{}] executing operation on [{}] shards", actionName, totalShards);
|
||||
final Object[] shardResultOrExceptions = new Object[totalShards];
|
||||
|
||||
int shardIndex = -1;
|
||||
for (final ShardRouting shardRouting : shards) {
|
||||
shardIndex++;
|
||||
onShardOperation(request, shardResultOrExceptions, shardIndex, shardRouting);
|
||||
}
|
||||
|
||||
List<BroadcastShardOperationFailedException> accumulatedExceptions = new ArrayList<>();
|
||||
List<ShardOperationResult> results = new ArrayList<>();
|
||||
for (int i = 0; i < totalShards; i++) {
|
||||
if (shardResultOrExceptions[i] instanceof BroadcastShardOperationFailedException) {
|
||||
accumulatedExceptions.add((BroadcastShardOperationFailedException) shardResultOrExceptions[i]);
|
||||
} else {
|
||||
results.add((ShardOperationResult) shardResultOrExceptions[i]);
|
||||
}
|
||||
}
|
||||
|
||||
channel.sendResponse(new NodeResponse(request.getNodeId(), totalShards, results, accumulatedExceptions));
|
||||
}
|
||||
|
||||
private void onShardOperation(final NodeRequest request, final Object[] shardResults, final int shardIndex, final ShardRouting shardRouting) {
|
||||
try {
|
||||
logger.trace("[{}] executing operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
ShardOperationResult result = shardOperation(request.indicesLevelRequest, shardRouting);
|
||||
shardResults[shardIndex] = result;
|
||||
logger.trace("[{}] completed operation for shard [{}]", actionName, shardRouting.shortSummary());
|
||||
} catch (Throwable t) {
|
||||
BroadcastShardOperationFailedException e = new BroadcastShardOperationFailedException(shardRouting.shardId(), "operation " + actionName + " failed", t);
|
||||
e.setIndex(shardRouting.getIndex());
|
||||
e.setShard(shardRouting.shardId());
|
||||
shardResults[shardIndex] = e;
|
||||
logger.debug("[{}] failed to execute operation for shard [{}]", e, actionName, shardRouting.shortSummary());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
protected class NodeRequest extends TransportRequest implements IndicesRequest {
|
||||
private String nodeId;
|
||||
|
||||
private List<ShardRouting> shards;
|
||||
|
||||
protected Request indicesLevelRequest;
|
||||
|
||||
protected NodeRequest() {
|
||||
}
|
||||
|
||||
public NodeRequest(String nodeId, Request request, List<ShardRouting> shards) {
|
||||
super(request);
|
||||
this.indicesLevelRequest = request;
|
||||
this.shards = shards;
|
||||
this.nodeId = nodeId;
|
||||
}
|
||||
|
||||
public List<ShardRouting> getShards() {
|
||||
return shards;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
||||
public String[] indices() {
|
||||
return indicesLevelRequest.indices();
|
||||
}
|
||||
|
||||
public IndicesOptions indicesOptions() {
|
||||
return indicesLevelRequest.indicesOptions();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
indicesLevelRequest = readRequestFrom(in);
|
||||
int size = in.readVInt();
|
||||
shards = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
shards.add(ShardRouting.readShardRoutingEntry(in));
|
||||
}
|
||||
nodeId = in.readString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
indicesLevelRequest.writeTo(out);
|
||||
int size = shards.size();
|
||||
out.writeVInt(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
shards.get(i).writeTo(out);
|
||||
}
|
||||
out.writeString(nodeId);
|
||||
}
|
||||
}
|
||||
|
||||
class NodeResponse extends TransportResponse {
|
||||
protected String nodeId;
|
||||
protected int totalShards;
|
||||
protected List<BroadcastShardOperationFailedException> exceptions;
|
||||
protected List<ShardOperationResult> results;
|
||||
|
||||
public NodeResponse() {
|
||||
}
|
||||
|
||||
public NodeResponse(String nodeId,
|
||||
int totalShards,
|
||||
List<ShardOperationResult> results,
|
||||
List<BroadcastShardOperationFailedException> exceptions) {
|
||||
this.nodeId = nodeId;
|
||||
this.totalShards = totalShards;
|
||||
this.results = results;
|
||||
this.exceptions = exceptions;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return nodeId;
|
||||
}
|
||||
|
||||
public int getTotalShards() {
|
||||
return totalShards;
|
||||
}
|
||||
|
||||
public int getSuccessfulShards() {
|
||||
return results.size();
|
||||
}
|
||||
|
||||
public List<BroadcastShardOperationFailedException> getExceptions() {
|
||||
return exceptions;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
super.readFrom(in);
|
||||
nodeId = in.readString();
|
||||
totalShards = in.readVInt();
|
||||
int resultsSize = in.readVInt();
|
||||
results = new ArrayList<>(resultsSize);
|
||||
for (; resultsSize > 0; resultsSize--) {
|
||||
final ShardOperationResult result = in.readBoolean() ? readShardResult(in) : null;
|
||||
results.add(result);
|
||||
}
|
||||
if (in.readBoolean()) {
|
||||
int failureShards = in.readVInt();
|
||||
exceptions = new ArrayList<>(failureShards);
|
||||
for (int i = 0; i < failureShards; i++) {
|
||||
exceptions.add(new BroadcastShardOperationFailedException(in));
|
||||
}
|
||||
} else {
|
||||
exceptions = null;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
super.writeTo(out);
|
||||
out.writeString(nodeId);
|
||||
out.writeVInt(totalShards);
|
||||
out.writeVInt(results.size());
|
||||
for (ShardOperationResult result : results) {
|
||||
out.writeOptionalStreamable(result);
|
||||
}
|
||||
out.writeBoolean(exceptions != null);
|
||||
if (exceptions != null) {
|
||||
int failureShards = exceptions.size();
|
||||
out.writeVInt(failureShards);
|
||||
for (int i = 0; i < failureShards; i++) {
|
||||
exceptions.get(i).writeTo(out);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Can be used for implementations of {@link #shardOperation(BroadcastRequest, ShardRouting) shardOperation} for
|
||||
* which there is no shard-level return value.
|
||||
*/
|
||||
public final static class EmptyResult implements Streamable {
|
||||
public static EmptyResult INSTANCE = new EmptyResult();
|
||||
|
||||
private EmptyResult() {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void writeTo(StreamOutput out) throws IOException {
|
||||
}
|
||||
|
||||
public static EmptyResult readEmptyResultFrom(StreamInput in) {
|
||||
return INSTANCE;
|
||||
}
|
||||
}
|
||||
}
|
@ -97,7 +97,7 @@ public abstract class TransportMasterNodeAction<Request extends MasterNodeReques
|
||||
listener.onFailure(blockException);
|
||||
return;
|
||||
}
|
||||
logger.trace("can't execute due to a cluster block: [{}], retrying", blockException);
|
||||
logger.trace("can't execute due to a cluster block, retrying", blockException);
|
||||
observer.waitForNextChange(
|
||||
new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
|
@ -46,7 +46,6 @@ public abstract class ReplicationRequest<T extends ReplicationRequest> extends A
|
||||
protected TimeValue timeout = DEFAULT_TIMEOUT;
|
||||
protected String index;
|
||||
|
||||
private boolean threadedOperation = true;
|
||||
private WriteConsistencyLevel consistencyLevel = WriteConsistencyLevel.DEFAULT;
|
||||
private volatile boolean canHaveDuplicates = false;
|
||||
|
||||
@ -76,7 +75,6 @@ public abstract class ReplicationRequest<T extends ReplicationRequest> extends A
|
||||
super(originalRequest);
|
||||
this.timeout = request.timeout();
|
||||
this.index = request.index();
|
||||
this.threadedOperation = request.operationThreaded();
|
||||
this.consistencyLevel = request.consistencyLevel();
|
||||
}
|
||||
|
||||
@ -91,23 +89,6 @@ public abstract class ReplicationRequest<T extends ReplicationRequest> extends A
|
||||
return canHaveDuplicates;
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls if the operation will be executed on a separate thread when executed locally.
|
||||
*/
|
||||
public final boolean operationThreaded() {
|
||||
return threadedOperation;
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls if the operation will be executed on a separate thread when executed locally. Defaults
|
||||
* to <tt>true</tt> when running in embedded mode.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final T operationThreaded(boolean threadedOperation) {
|
||||
this.threadedOperation = threadedOperation;
|
||||
return (T) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
|
@ -35,16 +35,6 @@ public abstract class ReplicationRequestBuilder<Request extends ReplicationReque
|
||||
super(client, action, request);
|
||||
}
|
||||
|
||||
/**
|
||||
* Controls if the operation will be executed on a separate thread when executed locally. Defaults
|
||||
* to <tt>true</tt> when running in embedded mode.
|
||||
*/
|
||||
@SuppressWarnings("unchecked")
|
||||
public final RequestBuilder setOperationThreaded(boolean threadedOperation) {
|
||||
request.operationThreaded(threadedOperation);
|
||||
return (RequestBuilder) this;
|
||||
}
|
||||
|
||||
/**
|
||||
* A timeout to wait if the index operation can't be performed immediately. Defaults to <tt>1m</tt>.
|
||||
*/
|
||||
|
@ -211,8 +211,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
class OperationTransportHandler implements TransportRequestHandler<Request> {
|
||||
@Override
|
||||
public void messageReceived(final Request request, final TransportChannel channel) throws Exception {
|
||||
// if we have a local operation, execute it on a thread since we don't spawn
|
||||
request.operationThreaded(true);
|
||||
execute(request, new ActionListener<Response>() {
|
||||
@Override
|
||||
public void onResponse(Response result) {
|
||||
@ -440,21 +438,17 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
protected void routeRequestOrPerformLocally(final ShardRouting primary, final ShardIterator shardsIt) {
|
||||
if (primary.currentNodeId().equals(observer.observedState().nodes().localNodeId())) {
|
||||
try {
|
||||
if (internalRequest.request().operationThreaded()) {
|
||||
threadPool.executor(executor).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
finishAsFailed(t);
|
||||
}
|
||||
threadPool.executor(executor).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
finishAsFailed(t);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
performOnPrimary(primary, shardsIt);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
performOnPrimary(primary, shardsIt);
|
||||
}
|
||||
@Override
|
||||
protected void doRun() throws Exception {
|
||||
performOnPrimary(primary, shardsIt);
|
||||
}
|
||||
});
|
||||
} catch (Throwable t) {
|
||||
finishAsFailed(t);
|
||||
}
|
||||
@ -506,9 +500,6 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
finishAsFailed(failure);
|
||||
return;
|
||||
}
|
||||
// make it threaded operation so we fork on the discovery listener thread
|
||||
internalRequest.request().operationThreaded(true);
|
||||
|
||||
observer.waitForNextChange(new ClusterStateObserver.Listener() {
|
||||
@Override
|
||||
public void onNewClusterState(ClusterState state) {
|
||||
@ -904,43 +895,33 @@ public abstract class TransportReplicationAction<Request extends ReplicationRequ
|
||||
|
||||
});
|
||||
} else {
|
||||
if (replicaRequest.operationThreaded()) {
|
||||
try {
|
||||
threadPool.executor(executor).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() {
|
||||
try {
|
||||
shardOperationOnReplica(shard.shardId(), replicaRequest);
|
||||
onReplicaSuccess();
|
||||
} catch (Throwable e) {
|
||||
onReplicaFailure(nodeId, e);
|
||||
failReplicaIfNeeded(shard.index(), shard.id(), e);
|
||||
}
|
||||
try {
|
||||
threadPool.executor(executor).execute(new AbstractRunnable() {
|
||||
@Override
|
||||
protected void doRun() {
|
||||
try {
|
||||
shardOperationOnReplica(shard.shardId(), replicaRequest);
|
||||
onReplicaSuccess();
|
||||
} catch (Throwable e) {
|
||||
onReplicaFailure(nodeId, e);
|
||||
failReplicaIfNeeded(shard.index(), shard.id(), e);
|
||||
}
|
||||
}
|
||||
|
||||
// we must never reject on because of thread pool capacity on replicas
|
||||
@Override
|
||||
public boolean isForceExecution() {
|
||||
return true;
|
||||
}
|
||||
// we must never reject on because of thread pool capacity on replicas
|
||||
@Override
|
||||
public boolean isForceExecution() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onReplicaFailure(nodeId, t);
|
||||
}
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
failReplicaIfNeeded(shard.index(), shard.id(), e);
|
||||
onReplicaFailure(nodeId, e);
|
||||
}
|
||||
} else {
|
||||
try {
|
||||
shardOperationOnReplica(shard.shardId(), replicaRequest);
|
||||
onReplicaSuccess();
|
||||
} catch (Throwable e) {
|
||||
failReplicaIfNeeded(shard.index(), shard.id(), e);
|
||||
onReplicaFailure(nodeId, e);
|
||||
}
|
||||
@Override
|
||||
public void onFailure(Throwable t) {
|
||||
onReplicaFailure(nodeId, t);
|
||||
}
|
||||
});
|
||||
} catch (Throwable e) {
|
||||
failReplicaIfNeeded(shard.index(), shard.id(), e);
|
||||
onReplicaFailure(nodeId, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -42,13 +42,12 @@ import org.elasticsearch.search.dfs.DfsSearchResult;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.atomic.AtomicReferenceArray;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Get the dfs only with no fetch phase. This is for internal use only.
|
||||
*/
|
||||
@ -115,7 +114,7 @@ public class TransportDfsOnlyAction extends TransportBroadcastAction<DfsOnlyRequ
|
||||
} else if (shardResponse instanceof BroadcastShardOperationFailedException) {
|
||||
failedShards++;
|
||||
if (shardFailures == null) {
|
||||
shardFailures = newArrayList();
|
||||
shardFailures = new ArrayList<>();
|
||||
}
|
||||
shardFailures.add(new DefaultShardOperationFailedException((BroadcastShardOperationFailedException) shardResponse));
|
||||
} else {
|
||||
|
@ -131,7 +131,6 @@ public class UpdateHelper extends AbstractComponent {
|
||||
.routing(request.routing())
|
||||
.parent(request.parent())
|
||||
.consistencyLevel(request.consistencyLevel());
|
||||
indexRequest.operationThreaded(false);
|
||||
if (request.versionType() != VersionType.INTERNAL) {
|
||||
// in all but the internal versioning mode, we want to create the new document using the given version.
|
||||
indexRequest.version(request.version()).versionType(request.versionType());
|
||||
@ -227,13 +226,11 @@ public class UpdateHelper extends AbstractComponent {
|
||||
.consistencyLevel(request.consistencyLevel())
|
||||
.timestamp(timestamp).ttl(ttl)
|
||||
.refresh(request.refresh());
|
||||
indexRequest.operationThreaded(false);
|
||||
return new Result(indexRequest, Operation.INDEX, updatedSourceAsMap, updateSourceContentType);
|
||||
} else if ("delete".equals(operation)) {
|
||||
DeleteRequest deleteRequest = Requests.deleteRequest(request.index()).type(request.type()).id(request.id()).routing(routing).parent(parent)
|
||||
.version(updateVersion).versionType(request.versionType())
|
||||
.consistencyLevel(request.consistencyLevel());
|
||||
deleteRequest.operationThreaded(false);
|
||||
return new Result(deleteRequest, Operation.DELETE, updatedSourceAsMap, updateSourceContentType);
|
||||
} else if ("none".equals(operation)) {
|
||||
UpdateResponse update = new UpdateResponse(getResult.getIndex(), getResult.getType(), getResult.getId(), getResult.getVersion(), false);
|
||||
|
@ -78,7 +78,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
|
||||
private boolean scriptedUpsert = false;
|
||||
private boolean docAsUpsert = false;
|
||||
private boolean detectNoop = false;
|
||||
private boolean detectNoop = true;
|
||||
|
||||
@Nullable
|
||||
private IndexRequest doc;
|
||||
@ -243,7 +243,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
* The script to execute. Note, make sure not to send different script each
|
||||
* times and instead use script params if possible with the same
|
||||
* (automatically compiled) script.
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -256,7 +256,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
* The script to execute. Note, make sure not to send different script each
|
||||
* times and instead use script params if possible with the same
|
||||
* (automatically compiled) script.
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -267,7 +267,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
|
||||
/**
|
||||
* The language of the script to execute.
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -286,7 +286,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
|
||||
/**
|
||||
* Add a script parameter.
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -311,7 +311,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
|
||||
/**
|
||||
* Sets the script parameters to use with the script.
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -338,7 +338,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
* The script to execute. Note, make sure not to send different script each
|
||||
* times and instead use script params if possible with the same
|
||||
* (automatically compiled) script.
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -360,7 +360,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
* The script type
|
||||
* @param scriptParams
|
||||
* The script parameters
|
||||
*
|
||||
*
|
||||
* @deprecated Use {@link #script(Script)} instead
|
||||
*/
|
||||
@Deprecated
|
||||
@ -623,7 +623,7 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this update attempt to detect if it is a noop?
|
||||
* Should this update attempt to detect if it is a noop? Defaults to true.
|
||||
* @return this for chaining
|
||||
*/
|
||||
public UpdateRequest detectNoop(boolean detectNoop) {
|
||||
@ -631,6 +631,9 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Should this update attempt to detect if it is a noop? Defaults to true.
|
||||
*/
|
||||
public boolean detectNoop() {
|
||||
return detectNoop;
|
||||
}
|
||||
@ -699,16 +702,15 @@ public class UpdateRequest extends InstanceShardOperationRequest<UpdateRequest>
|
||||
this.docAsUpsert = shouldUpsertDoc;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
public boolean scriptedUpsert(){
|
||||
return this.scriptedUpsert;
|
||||
}
|
||||
|
||||
|
||||
public UpdateRequest scriptedUpsert(boolean scriptedUpsert) {
|
||||
this.scriptedUpsert = scriptedUpsert;
|
||||
return this;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public void readFrom(StreamInput in) throws IOException {
|
||||
|
@ -308,6 +308,7 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||
|
||||
/**
|
||||
* Sets whether to perform extra effort to detect noop updates via docAsUpsert.
|
||||
* Defautls to true.
|
||||
*/
|
||||
public UpdateRequestBuilder setDetectNoop(boolean detectNoop) {
|
||||
request.detectNoop(detectNoop);
|
||||
@ -322,4 +323,14 @@ public class UpdateRequestBuilder extends InstanceShardOperationRequestBuilder<U
|
||||
request.scriptedUpsert(scriptedUpsert);
|
||||
return this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the new ttl of the document. Note that if detectNoop is true (the default)
|
||||
* and the source of the document isn't changed then the ttl update won't take
|
||||
* effect.
|
||||
*/
|
||||
public UpdateRequestBuilder setTtl(Long ttl) {
|
||||
request.doc().ttl(ttl);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
|
@ -21,7 +21,6 @@ package org.elasticsearch.client.transport;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.ExceptionsHelper;
|
||||
import org.elasticsearch.Version;
|
||||
@ -43,9 +42,20 @@ import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.ConcurrentCollections;
|
||||
import org.elasticsearch.common.util.concurrent.FutureUtils;
|
||||
import org.elasticsearch.threadpool.ThreadPool;
|
||||
import org.elasticsearch.transport.*;
|
||||
import org.elasticsearch.transport.BaseTransportResponseHandler;
|
||||
import org.elasticsearch.transport.ConnectTransportException;
|
||||
import org.elasticsearch.transport.FutureTransportResponseHandler;
|
||||
import org.elasticsearch.transport.TransportException;
|
||||
import org.elasticsearch.transport.TransportRequestOptions;
|
||||
import org.elasticsearch.transport.TransportService;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.ScheduledFuture;
|
||||
@ -143,7 +153,7 @@ public class TransportClientNodesService extends AbstractComponent {
|
||||
if (closed) {
|
||||
throw new IllegalStateException("transport client is closed, can't add an address");
|
||||
}
|
||||
List<TransportAddress> filtered = Lists.newArrayListWithExpectedSize(transportAddresses.length);
|
||||
List<TransportAddress> filtered = new ArrayList<>(transportAddresses.length);
|
||||
for (TransportAddress transportAddress : transportAddresses) {
|
||||
boolean found = false;
|
||||
for (DiscoveryNode otherNode : listedNodes) {
|
||||
|
@ -21,11 +21,11 @@ package org.elasticsearch.cluster;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNodes;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
|
||||
@ -93,7 +93,7 @@ public class ClusterChangedEvent {
|
||||
String index = cursor.value;
|
||||
if (!previousState.metaData().hasIndex(index)) {
|
||||
if (created == null) {
|
||||
created = Lists.newArrayList();
|
||||
created = new ArrayList<>();
|
||||
}
|
||||
created.add(index);
|
||||
}
|
||||
@ -126,7 +126,7 @@ public class ClusterChangedEvent {
|
||||
String index = cursor.value;
|
||||
if (!state.metaData().hasIndex(index)) {
|
||||
if (deleted == null) {
|
||||
deleted = Lists.newArrayList();
|
||||
deleted = new ArrayList<>();
|
||||
}
|
||||
deleted.add(index);
|
||||
}
|
||||
|
@ -32,28 +32,53 @@ import java.util.Map;
|
||||
*/
|
||||
public class ClusterInfo {
|
||||
|
||||
private final Map<String, DiskUsage> usages;
|
||||
private final Map<String, DiskUsage> leastAvailableSpaceUsage;
|
||||
private final Map<String, DiskUsage> mostAvailabeSpaceUsage;
|
||||
final Map<String, Long> shardSizes;
|
||||
public static final ClusterInfo EMPTY = new ClusterInfo();
|
||||
|
||||
private ClusterInfo() {
|
||||
this.usages = Collections.emptyMap();
|
||||
this.shardSizes = Collections.emptyMap();
|
||||
protected ClusterInfo() {
|
||||
this(Collections.EMPTY_MAP, Collections.EMPTY_MAP, Collections.EMPTY_MAP);
|
||||
}
|
||||
|
||||
public ClusterInfo(Map<String, DiskUsage> usages, Map<String, Long> shardSizes) {
|
||||
this.usages = usages;
|
||||
/**
|
||||
* Creates a new ClusterInfo instance.
|
||||
*
|
||||
* @param leastAvailableSpaceUsage a node id to disk usage mapping for the path that has the least available space on the node.
|
||||
* @param mostAvailableSpaceUsage a node id to disk usage mapping for the path that has the most available space on the node.
|
||||
* @param shardSizes a shardkey to size in bytes mapping per shard.
|
||||
* @see #shardIdentifierFromRouting
|
||||
*/
|
||||
public ClusterInfo(final Map<String, DiskUsage> leastAvailableSpaceUsage, final Map<String, DiskUsage> mostAvailableSpaceUsage, final Map<String, Long> shardSizes) {
|
||||
this.leastAvailableSpaceUsage = leastAvailableSpaceUsage;
|
||||
this.shardSizes = shardSizes;
|
||||
this.mostAvailabeSpaceUsage = mostAvailableSpaceUsage;
|
||||
}
|
||||
|
||||
public Map<String, DiskUsage> getNodeDiskUsages() {
|
||||
return this.usages;
|
||||
/**
|
||||
* Returns a node id to disk usage mapping for the path that has the least available space on the node.
|
||||
*/
|
||||
public Map<String, DiskUsage> getNodeLeastAvailableDiskUsages() {
|
||||
return this.leastAvailableSpaceUsage;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns a node id to disk usage mapping for the path that has the most available space on the node.
|
||||
*/
|
||||
public Map<String, DiskUsage> getNodeMostAvailableDiskUsages() {
|
||||
return this.mostAvailabeSpaceUsage;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the shard size for the given shard routing or <code>null</code> it that metric is not available.
|
||||
*/
|
||||
public Long getShardSize(ShardRouting shardRouting) {
|
||||
return shardSizes.get(shardIdentifierFromRouting(shardRouting));
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the shard size for the given shard routing or <code>defaultValue</code> it that metric is not available.
|
||||
*/
|
||||
public long getShardSize(ShardRouting shardRouting, long defaultValue) {
|
||||
Long shardSize = getShardSize(shardRouting);
|
||||
return shardSize == null ? defaultValue : shardSize;
|
||||
|
@ -27,11 +27,11 @@ import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
|
||||
public final class DiffableUtils {
|
||||
@ -232,13 +232,13 @@ public final class DiffableUtils {
|
||||
protected final Map<String, T> adds;
|
||||
|
||||
protected MapDiff() {
|
||||
deletes = newArrayList();
|
||||
deletes = new ArrayList<>();
|
||||
diffs = newHashMap();
|
||||
adds = newHashMap();
|
||||
}
|
||||
|
||||
protected MapDiff(StreamInput in, KeyedReader<T> reader) throws IOException {
|
||||
deletes = newArrayList();
|
||||
deletes = new ArrayList<>();
|
||||
diffs = newHashMap();
|
||||
adds = newHashMap();
|
||||
int deletesCount = in.readVInt();
|
||||
|
@ -28,6 +28,7 @@ import org.elasticsearch.common.unit.ByteSizeValue;
|
||||
public class DiskUsage {
|
||||
final String nodeId;
|
||||
final String nodeName;
|
||||
final String path;
|
||||
final long totalBytes;
|
||||
final long freeBytes;
|
||||
|
||||
@ -35,11 +36,12 @@ public class DiskUsage {
|
||||
* Create a new DiskUsage, if {@code totalBytes} is 0, {@get getFreeDiskAsPercentage}
|
||||
* will always return 100.0% free
|
||||
*/
|
||||
public DiskUsage(String nodeId, String nodeName, long totalBytes, long freeBytes) {
|
||||
public DiskUsage(String nodeId, String nodeName, String path, long totalBytes, long freeBytes) {
|
||||
this.nodeId = nodeId;
|
||||
this.nodeName = nodeName;
|
||||
this.freeBytes = freeBytes;
|
||||
this.totalBytes = totalBytes;
|
||||
this.path = path;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
@ -50,6 +52,10 @@ public class DiskUsage {
|
||||
return nodeName;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return path;
|
||||
}
|
||||
|
||||
public double getFreeDiskAsPercentage() {
|
||||
// We return 100.0% in order to fail "open", in that if we have invalid
|
||||
// numbers for the total bytes, it's as if we don't know disk usage.
|
||||
@ -77,7 +83,7 @@ public class DiskUsage {
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
return "[" + nodeId + "][" + nodeName + "] free: " + new ByteSizeValue(getFreeBytes()) +
|
||||
return "[" + nodeId + "][" + nodeName + "][" + path + "] free: " + new ByteSizeValue(getFreeBytes()) +
|
||||
"[" + Strings.format1Decimals(getFreeDiskAsPercentage(), "%") + "]";
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.cluster;
|
||||
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.LatchedActionListener;
|
||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||
@ -35,9 +34,9 @@ import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.DiskThresholdDecider;
|
||||
import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.concurrent.AbstractRunnable;
|
||||
import org.elasticsearch.common.util.concurrent.EsRejectedExecutionException;
|
||||
import org.elasticsearch.monitor.fs.FsInfo;
|
||||
import org.elasticsearch.node.settings.NodeSettingsService;
|
||||
@ -47,7 +46,6 @@ import org.elasticsearch.transport.ReceiveTimeoutTransportException;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.CountDownLatch;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
/**
|
||||
* InternalClusterInfoService provides the ClusterInfoService interface,
|
||||
@ -67,7 +65,8 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
|
||||
private volatile TimeValue updateFrequency;
|
||||
|
||||
private volatile Map<String, DiskUsage> usages;
|
||||
private volatile Map<String, DiskUsage> leastAvailableSpaceUsages;
|
||||
private volatile Map<String, DiskUsage> mostAvailableSpaceUsages;
|
||||
private volatile Map<String, Long> shardSizes;
|
||||
private volatile boolean isMaster = false;
|
||||
private volatile boolean enabled;
|
||||
@ -84,7 +83,8 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
TransportIndicesStatsAction transportIndicesStatsAction, ClusterService clusterService,
|
||||
ThreadPool threadPool) {
|
||||
super(settings);
|
||||
this.usages = Collections.emptyMap();
|
||||
this.leastAvailableSpaceUsages = Collections.emptyMap();
|
||||
this.mostAvailableSpaceUsages = Collections.emptyMap();
|
||||
this.shardSizes = Collections.emptyMap();
|
||||
this.transportNodesStatsAction = transportNodesStatsAction;
|
||||
this.transportIndicesStatsAction = transportIndicesStatsAction;
|
||||
@ -200,9 +200,16 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("Removing node from cluster info: {}", removedNode.getId());
|
||||
}
|
||||
Map<String, DiskUsage> newUsages = new HashMap<>(usages);
|
||||
newUsages.remove(removedNode.getId());
|
||||
usages = Collections.unmodifiableMap(newUsages);
|
||||
if (leastAvailableSpaceUsages.containsKey(removedNode.getId())) {
|
||||
Map<String, DiskUsage> newMaxUsages = new HashMap<>(leastAvailableSpaceUsages);
|
||||
newMaxUsages.remove(removedNode.getId());
|
||||
leastAvailableSpaceUsages = Collections.unmodifiableMap(newMaxUsages);
|
||||
}
|
||||
if (mostAvailableSpaceUsages.containsKey(removedNode.getId())) {
|
||||
Map<String, DiskUsage> newMinUsages = new HashMap<>(mostAvailableSpaceUsages);
|
||||
newMinUsages.remove(removedNode.getId());
|
||||
mostAvailableSpaceUsages = Collections.unmodifiableMap(newMinUsages);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -210,7 +217,7 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
|
||||
@Override
|
||||
public ClusterInfo getClusterInfo() {
|
||||
return new ClusterInfo(usages, shardSizes);
|
||||
return new ClusterInfo(leastAvailableSpaceUsages, mostAvailableSpaceUsages, shardSizes);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -313,27 +320,11 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
CountDownLatch nodeLatch = updateNodeStats(new ActionListener<NodesStatsResponse>() {
|
||||
@Override
|
||||
public void onResponse(NodesStatsResponse nodeStatses) {
|
||||
Map<String, DiskUsage> newUsages = new HashMap<>();
|
||||
for (NodeStats nodeStats : nodeStatses.getNodes()) {
|
||||
if (nodeStats.getFs() == null) {
|
||||
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name());
|
||||
} else {
|
||||
long available = 0;
|
||||
long total = 0;
|
||||
|
||||
for (FsInfo.Path info : nodeStats.getFs()) {
|
||||
available += info.getAvailable().bytes();
|
||||
total += info.getTotal().bytes();
|
||||
}
|
||||
String nodeId = nodeStats.getNode().id();
|
||||
String nodeName = nodeStats.getNode().getName();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}], total disk: {}, available disk: {}", nodeId, total, available);
|
||||
}
|
||||
newUsages.put(nodeId, new DiskUsage(nodeId, nodeName, total, available));
|
||||
}
|
||||
}
|
||||
usages = Collections.unmodifiableMap(newUsages);
|
||||
Map<String, DiskUsage> newLeastAvaiableUsages = new HashMap<>();
|
||||
Map<String, DiskUsage> newMostAvaiableUsages = new HashMap<>();
|
||||
fillDiskUsagePerNode(logger, nodeStatses.getNodes(), newLeastAvaiableUsages, newMostAvaiableUsages);
|
||||
leastAvailableSpaceUsages = Collections.unmodifiableMap(newLeastAvaiableUsages);
|
||||
mostAvailableSpaceUsages = Collections.unmodifiableMap(newMostAvaiableUsages);
|
||||
}
|
||||
|
||||
@Override
|
||||
@ -349,7 +340,8 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
logger.warn("Failed to execute NodeStatsAction for ClusterInfoUpdateJob", e);
|
||||
}
|
||||
// we empty the usages list, to be safe - we don't know what's going on.
|
||||
usages = Collections.emptyMap();
|
||||
leastAvailableSpaceUsages = Collections.emptyMap();
|
||||
mostAvailableSpaceUsages = Collections.emptyMap();
|
||||
}
|
||||
}
|
||||
});
|
||||
@ -412,5 +404,34 @@ public class InternalClusterInfoService extends AbstractComponent implements Clu
|
||||
}
|
||||
}
|
||||
|
||||
static void fillDiskUsagePerNode(ESLogger logger, NodeStats[] nodeStatsArray, Map<String, DiskUsage> newLeastAvaiableUsages, Map<String, DiskUsage> newMostAvaiableUsages) {
|
||||
for (NodeStats nodeStats : nodeStatsArray) {
|
||||
if (nodeStats.getFs() == null) {
|
||||
logger.warn("Unable to retrieve node FS stats for {}", nodeStats.getNode().name());
|
||||
} else {
|
||||
FsInfo.Path leastAvailablePath = null;
|
||||
FsInfo.Path mostAvailablePath = null;
|
||||
for (FsInfo.Path info : nodeStats.getFs()) {
|
||||
if (leastAvailablePath == null) {
|
||||
assert mostAvailablePath == null;
|
||||
mostAvailablePath = leastAvailablePath = info;
|
||||
} else if (leastAvailablePath.getAvailable().bytes() > info.getAvailable().bytes()){
|
||||
leastAvailablePath = info;
|
||||
} else if (mostAvailablePath.getAvailable().bytes() < info.getAvailable().bytes()) {
|
||||
mostAvailablePath = info;
|
||||
}
|
||||
}
|
||||
String nodeId = nodeStats.getNode().id();
|
||||
String nodeName = nodeStats.getNode().getName();
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("node: [{}], most available: total disk: {}, available disk: {} / least available: total disk: {}, available disk: {}", nodeId, mostAvailablePath.getTotal(), leastAvailablePath.getAvailable(), leastAvailablePath.getTotal(), leastAvailablePath.getAvailable());
|
||||
}
|
||||
newLeastAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, leastAvailablePath.getPath(), leastAvailablePath.getTotal().bytes(), leastAvailablePath.getAvailable().bytes()));
|
||||
newMostAvaiableUsages.put(nodeId, new DiskUsage(nodeId, nodeName, mostAvailablePath.getPath(), mostAvailablePath.getTotal().bytes(), mostAvailablePath.getAvailable().bytes()));
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
@ -41,10 +41,17 @@ import org.joda.time.DateTimeZone;
|
||||
import org.joda.time.format.DateTimeFormat;
|
||||
import org.joda.time.format.DateTimeFormatter;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Maps.filterEntries;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
|
||||
@ -268,7 +275,7 @@ public class IndexNameExpressionResolver extends AbstractComponent {
|
||||
if (filteringRequired) {
|
||||
// If filtering required - add it to the list of filters
|
||||
if (filteringAliases == null) {
|
||||
filteringAliases = newArrayList();
|
||||
filteringAliases = new ArrayList<>();
|
||||
}
|
||||
filteringAliases.add(alias);
|
||||
} else {
|
||||
|
@ -23,7 +23,10 @@ import com.carrotsearch.hppc.ObjectHashSet;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.*;
|
||||
import com.google.common.collect.Collections2;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
@ -45,7 +48,12 @@ import org.elasticsearch.common.logging.ESLogger;
|
||||
import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.common.settings.loader.SettingsLoader;
|
||||
import org.elasticsearch.common.xcontent.*;
|
||||
import org.elasticsearch.common.xcontent.FromXContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentFactory;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
import org.elasticsearch.common.xcontent.XContentType;
|
||||
import org.elasticsearch.discovery.DiscoverySettings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.indices.recovery.RecoverySettings;
|
||||
@ -55,7 +63,18 @@ import org.elasticsearch.rest.RestStatus;
|
||||
import org.elasticsearch.search.warmer.IndexWarmersMetaData;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.EnumSet;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.SortedMap;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import static org.elasticsearch.common.settings.Settings.*;
|
||||
|
||||
@ -246,7 +265,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
|
||||
for (String index : intersection) {
|
||||
IndexMetaData indexMetaData = indices.get(index);
|
||||
List<AliasMetaData> filteredValues = Lists.newArrayList();
|
||||
List<AliasMetaData> filteredValues = new ArrayList<>();
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
|
||||
AliasMetaData value = cursor.value;
|
||||
if (matchAllAliases || Regex.simpleMatch(aliases, value.alias())) {
|
||||
@ -295,7 +314,7 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
Iterable<String> intersection = HppcMaps.intersection(ObjectHashSet.from(concreteIndices), indices.keys());
|
||||
for (String index : intersection) {
|
||||
IndexMetaData indexMetaData = indices.get(index);
|
||||
List<AliasMetaData> filteredValues = Lists.newArrayList();
|
||||
List<AliasMetaData> filteredValues = new ArrayList<>();
|
||||
for (ObjectCursor<AliasMetaData> cursor : indexMetaData.getAliases().values()) {
|
||||
AliasMetaData value = cursor.value;
|
||||
if (Regex.simpleMatch(aliases, value.alias())) {
|
||||
@ -978,14 +997,14 @@ public class MetaData implements Iterable<IndexMetaData>, Diffable<MetaData>, Fr
|
||||
// TODO: I think we can remove these arrays. it isn't worth the effort, for operations on all indices.
|
||||
// When doing an operation across all indices, most of the time is spent on actually going to all shards and
|
||||
// do the required operations, the bottleneck isn't resolving expressions into concrete indices.
|
||||
List<String> allIndicesLst = Lists.newArrayList();
|
||||
List<String> allIndicesLst = new ArrayList<>();
|
||||
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
|
||||
allIndicesLst.add(cursor.value.index());
|
||||
}
|
||||
String[] allIndices = allIndicesLst.toArray(new String[allIndicesLst.size()]);
|
||||
|
||||
List<String> allOpenIndicesLst = Lists.newArrayList();
|
||||
List<String> allClosedIndicesLst = Lists.newArrayList();
|
||||
List<String> allOpenIndicesLst = new ArrayList<>();
|
||||
List<String> allClosedIndicesLst = new ArrayList<>();
|
||||
for (ObjectCursor<IndexMetaData> cursor : indices.values()) {
|
||||
IndexMetaData indexMetaData = cursor.value;
|
||||
if (indexMetaData.state() == IndexMetaData.State.OPEN) {
|
||||
|
@ -22,7 +22,6 @@ package org.elasticsearch.cluster.metadata;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
|
||||
import com.google.common.base.Charsets;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.ElasticsearchException;
|
||||
@ -80,7 +79,12 @@ import java.io.UnsupportedEncodingException;
|
||||
import java.nio.file.DirectoryStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.Semaphore;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
@ -248,7 +252,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
|
||||
Map<String, AliasMetaData> templatesAliases = Maps.newHashMap();
|
||||
|
||||
List<String> templateNames = Lists.newArrayList();
|
||||
List<String> templateNames = new ArrayList<>();
|
||||
|
||||
for (Map.Entry<String, String> entry : request.mappings().entrySet()) {
|
||||
mappings.put(entry.getKey(), parseMapping(entry.getValue()));
|
||||
@ -494,7 +498,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
}
|
||||
|
||||
private List<IndexTemplateMetaData> findTemplates(CreateIndexClusterStateUpdateRequest request, ClusterState state, IndexTemplateFilter indexTemplateFilter) throws IOException {
|
||||
List<IndexTemplateMetaData> templates = Lists.newArrayList();
|
||||
List<IndexTemplateMetaData> templates = new ArrayList<>();
|
||||
for (ObjectCursor<IndexTemplateMetaData> cursor : state.metaData().templates().values()) {
|
||||
IndexTemplateMetaData template = cursor.value;
|
||||
if (indexTemplateFilter.apply(request, template)) {
|
||||
@ -527,7 +531,7 @@ public class MetaDataCreateIndexService extends AbstractComponent {
|
||||
|
||||
List<String> getIndexSettingsValidationErrors(Settings settings) {
|
||||
String customPath = settings.get(IndexMetaData.SETTING_DATA_PATH, null);
|
||||
List<String> validationErrors = Lists.newArrayList();
|
||||
List<String> validationErrors = new ArrayList<>();
|
||||
if (customPath != null && env.sharedDataFile() == null) {
|
||||
validationErrors.add("path.shared_data must be set in order to use custom data paths");
|
||||
} else if (customPath != null) {
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.action.ActionListener;
|
||||
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesClusterStateUpdateRequest;
|
||||
@ -34,10 +33,11 @@ import org.elasticsearch.common.component.AbstractComponent;
|
||||
import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.index.IndexService;
|
||||
import org.elasticsearch.index.mapper.MapperService;
|
||||
import org.elasticsearch.indices.IndicesService;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -69,7 +69,7 @@ public class MetaDataIndexAliasesService extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) {
|
||||
List<String> indicesToClose = Lists.newArrayList();
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
Map<String, IndexService> indices = Maps.newHashMap();
|
||||
try {
|
||||
for (AliasAction aliasAction : request.actions()) {
|
||||
|
@ -19,7 +19,6 @@
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.action.admin.indices.alias.Alias;
|
||||
@ -39,6 +38,7 @@ import org.elasticsearch.indices.IndexTemplateAlreadyExistsException;
|
||||
import org.elasticsearch.indices.IndexTemplateMissingException;
|
||||
import org.elasticsearch.indices.InvalidIndexTemplateException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
import java.util.Map;
|
||||
@ -179,7 +179,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
}
|
||||
|
||||
private void validate(PutRequest request) {
|
||||
List<String> validationErrors = Lists.newArrayList();
|
||||
List<String> validationErrors = new ArrayList<>();
|
||||
if (request.name.contains(" ")) {
|
||||
validationErrors.add("name must not contain a space");
|
||||
}
|
||||
@ -240,7 +240,7 @@ public class MetaDataIndexTemplateService extends AbstractComponent {
|
||||
String template;
|
||||
Settings settings = Settings.Builder.EMPTY_SETTINGS;
|
||||
Map<String, String> mappings = Maps.newHashMap();
|
||||
List<Alias> aliases = Lists.newArrayList();
|
||||
List<Alias> aliases = new ArrayList<>();
|
||||
Map<String, IndexMetaData.Custom> customs = Maps.newHashMap();
|
||||
|
||||
TimeValue masterTimeout = MasterNodeRequest.DEFAULT_MASTER_NODE_TIMEOUT;
|
||||
|
@ -20,7 +20,6 @@
|
||||
package org.elasticsearch.cluster.metadata;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import com.google.common.collect.Sets;
|
||||
import org.elasticsearch.Version;
|
||||
@ -47,7 +46,12 @@ import org.elasticsearch.indices.IndicesService;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.percolator.PercolatorService;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
/**
|
||||
@ -226,7 +230,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
if (task instanceof RefreshTask) {
|
||||
RefreshTask refreshTask = (RefreshTask) task;
|
||||
try {
|
||||
List<String> updatedTypes = Lists.newArrayList();
|
||||
List<String> updatedTypes = new ArrayList<>();
|
||||
for (String type : refreshTask.types) {
|
||||
if (processedRefreshes.contains(type)) {
|
||||
continue;
|
||||
@ -342,7 +346,7 @@ public class MetaDataMappingService extends AbstractComponent {
|
||||
|
||||
@Override
|
||||
public ClusterState execute(final ClusterState currentState) throws Exception {
|
||||
List<String> indicesToClose = Lists.newArrayList();
|
||||
List<String> indicesToClose = new ArrayList<>();
|
||||
try {
|
||||
for (String index : request.indices()) {
|
||||
if (!currentState.metaData().hasIndex(index)) {
|
||||
|
@ -35,12 +35,11 @@ import org.elasticsearch.common.regex.Regex;
|
||||
import org.elasticsearch.common.transport.TransportAddress;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* This class holds all {@link DiscoveryNode} in the cluster and provides convenience methods to
|
||||
* access, modify merge / diff discovery nodes.
|
||||
@ -414,8 +413,8 @@ public class DiscoveryNodes extends AbstractDiffable<DiscoveryNodes> implements
|
||||
* Returns the changes comparing this nodes to the provided nodes.
|
||||
*/
|
||||
public Delta delta(DiscoveryNodes other) {
|
||||
List<DiscoveryNode> removed = newArrayList();
|
||||
List<DiscoveryNode> added = newArrayList();
|
||||
List<DiscoveryNode> removed = new ArrayList<>();
|
||||
List<DiscoveryNode> added = new ArrayList<>();
|
||||
for (DiscoveryNode node : other) {
|
||||
if (!this.nodeExists(node.id())) {
|
||||
removed.add(node);
|
||||
|
@ -25,7 +25,6 @@ import com.carrotsearch.hppc.cursors.IntObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Sets;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.AbstractDiffable;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -42,8 +41,6 @@ import java.util.List;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import static com.google.common.collect.Lists.*;
|
||||
|
||||
/**
|
||||
* The {@link IndexRoutingTable} represents routing information for a single
|
||||
* index. The routing table maintains a list of all shards in the index. A
|
||||
@ -265,7 +262,7 @@ public class IndexRoutingTable extends AbstractDiffable<IndexRoutingTable> imple
|
||||
* @return a {@link List} of shards that match one of the given {@link ShardRoutingState states}
|
||||
*/
|
||||
public List<ShardRouting> shardsWithState(ShardRoutingState state) {
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (IndexShardRoutingTable shardRoutingTable : this) {
|
||||
shards.addAll(shardRoutingTable.shardsWithState(state));
|
||||
}
|
||||
|
@ -30,11 +30,15 @@ import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.Iterator;
|
||||
import java.util.LinkedList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* {@link IndexShardRoutingTable} encapsulates all instances of a single shard.
|
||||
* Each Elasticsearch index consists of multiple shards, each shard encapsulates
|
||||
@ -554,7 +558,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
||||
}
|
||||
|
||||
public List<ShardRouting> replicaShardsWithState(ShardRoutingState... states) {
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (ShardRouting shardEntry : replicas) {
|
||||
for (ShardRoutingState state : states) {
|
||||
if (shardEntry.state() == state) {
|
||||
@ -569,7 +573,7 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
||||
if (state == ShardRoutingState.INITIALIZING) {
|
||||
return allInitializingShards;
|
||||
}
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (ShardRouting shardEntry : this) {
|
||||
if (shardEntry.state() == state) {
|
||||
shards.add(shardEntry);
|
||||
@ -585,12 +589,12 @@ public class IndexShardRoutingTable implements Iterable<ShardRouting> {
|
||||
|
||||
public Builder(IndexShardRoutingTable indexShard) {
|
||||
this.shardId = indexShard.shardId;
|
||||
this.shards = newArrayList(indexShard.shards);
|
||||
this.shards = new ArrayList<>(indexShard.shards);
|
||||
}
|
||||
|
||||
public Builder(ShardId shardId) {
|
||||
this.shardId = shardId;
|
||||
this.shards = newArrayList();
|
||||
this.shards = new ArrayList<>();
|
||||
}
|
||||
|
||||
public Builder addShard(ShardRouting shardEntry) {
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.Version;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
@ -36,6 +35,7 @@ import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
import org.elasticsearch.index.shard.ShardNotFoundException;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.HashSet;
|
||||
import java.util.Map;
|
||||
@ -90,7 +90,7 @@ public class OperationRouting extends AbstractComponent {
|
||||
set.add(iterator);
|
||||
}
|
||||
}
|
||||
return new GroupShardsIterator(Lists.newArrayList(set));
|
||||
return new GroupShardsIterator(new ArrayList<>(set));
|
||||
}
|
||||
|
||||
private static final Map<String, Set<String>> EMPTY_ROUTING = Collections.emptyMap();
|
||||
|
@ -27,8 +27,6 @@ import java.util.Collection;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* A {@link RoutingNode} represents a cluster node associated with a single {@link DiscoveryNode} including all shards
|
||||
* that are hosted on that nodes. Each {@link RoutingNode} has a unique node id that can be used to identify the node.
|
||||
@ -118,7 +116,7 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
||||
* @return List of shards
|
||||
*/
|
||||
public List<ShardRouting> shardsWithState(ShardRoutingState... states) {
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (ShardRouting shardEntry : this) {
|
||||
for (ShardRoutingState state : states) {
|
||||
if (shardEntry.state() == state) {
|
||||
@ -136,7 +134,7 @@ public class RoutingNode implements Iterable<ShardRouting> {
|
||||
* @return a list of shards
|
||||
*/
|
||||
public List<ShardRouting> shardsWithState(String index, ShardRoutingState... states) {
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
|
||||
for (ShardRouting shardEntry : this) {
|
||||
if (!shardEntry.index().equals(index)) {
|
||||
|
@ -22,19 +22,25 @@ package org.elasticsearch.cluster.routing;
|
||||
import com.carrotsearch.hppc.ObjectIntHashMap;
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.collect.*;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.Iterators;
|
||||
import org.apache.lucene.util.CollectionUtil;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.block.ClusterBlocks;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.collect.ImmutableOpenMap;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.util.*;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
import static com.google.common.collect.Sets.newHashSet;
|
||||
|
||||
@ -97,7 +103,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
if (shard.assignedToNode()) {
|
||||
List<ShardRouting> entries = nodesToShards.get(shard.currentNodeId());
|
||||
if (entries == null) {
|
||||
entries = newArrayList();
|
||||
entries = new ArrayList<>();
|
||||
nodesToShards.put(shard.currentNodeId(), entries);
|
||||
}
|
||||
final ShardRouting sr = getRouting(shard, readOnly);
|
||||
@ -107,7 +113,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
entries = nodesToShards.get(shard.relocatingNodeId());
|
||||
relocatingShards++;
|
||||
if (entries == null) {
|
||||
entries = newArrayList();
|
||||
entries = new ArrayList<>();
|
||||
nodesToShards.put(shard.relocatingNodeId(), entries);
|
||||
}
|
||||
// add the counterpart shard with relocatingNodeId reflecting the source from which
|
||||
@ -285,7 +291,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
}
|
||||
|
||||
public List<ShardRouting> shards(Predicate<ShardRouting> predicate) {
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (RoutingNode routingNode : this) {
|
||||
for (ShardRouting shardRouting : routingNode) {
|
||||
if (predicate.apply(shardRouting)) {
|
||||
@ -298,7 +304,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
|
||||
public List<ShardRouting> shardsWithState(ShardRoutingState... state) {
|
||||
// TODO these are used on tests only - move into utils class
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (RoutingNode routingNode : this) {
|
||||
shards.addAll(routingNode.shardsWithState(state));
|
||||
}
|
||||
@ -313,7 +319,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
|
||||
public List<ShardRouting> shardsWithState(String index, ShardRoutingState... state) {
|
||||
// TODO these are used on tests only - move into utils class
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (RoutingNode routingNode : this) {
|
||||
shards.addAll(routingNode.shardsWithState(index, state));
|
||||
}
|
||||
@ -452,7 +458,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
}
|
||||
List<ShardRouting> shards = assignedShards.get(shard.shardId());
|
||||
if (shards == null) {
|
||||
shards = Lists.newArrayList();
|
||||
shards = new ArrayList<>();
|
||||
assignedShards.put(shard.shardId(), shards);
|
||||
}
|
||||
assert assertInstanceNotInList(shard, shards);
|
||||
@ -724,7 +730,7 @@ public class RoutingNodes implements Iterable<RoutingNode> {
|
||||
}
|
||||
// Assert that the active shard routing are identical.
|
||||
Set<Map.Entry<String, Integer>> entries = indicesAndShards.entrySet();
|
||||
final List<ShardRouting> shards = newArrayList();
|
||||
final List<ShardRouting> shards = new ArrayList<>();
|
||||
for (Map.Entry<String, Integer> e : entries) {
|
||||
String index = e.getKey();
|
||||
for (int i = 0; i < e.getValue(); i++) {
|
||||
|
@ -20,14 +20,19 @@
|
||||
package org.elasticsearch.cluster.routing;
|
||||
|
||||
import com.carrotsearch.hppc.IntSet;
|
||||
import com.google.common.collect.*;
|
||||
import org.elasticsearch.cluster.*;
|
||||
import com.google.common.base.Predicate;
|
||||
import com.google.common.base.Predicates;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Iterables;
|
||||
import com.google.common.collect.UnmodifiableIterator;
|
||||
import org.elasticsearch.cluster.Diff;
|
||||
import org.elasticsearch.cluster.Diffable;
|
||||
import org.elasticsearch.cluster.DiffableUtils;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.metadata.MetaData;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.index.IndexNotFoundException;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
@ -35,7 +40,6 @@ import java.util.Collections;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
|
||||
/**
|
||||
@ -107,7 +111,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
}
|
||||
|
||||
public List<ShardRouting> shardsWithState(ShardRoutingState state) {
|
||||
List<ShardRouting> shards = newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (IndexRoutingTable indexRoutingTable : this) {
|
||||
shards.addAll(indexRoutingTable.shardsWithState(state));
|
||||
}
|
||||
@ -120,7 +124,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
* @return All the shards
|
||||
*/
|
||||
public List<ShardRouting> allShards() {
|
||||
List<ShardRouting> shards = Lists.newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
String[] indices = indicesRouting.keySet().toArray(new String[indicesRouting.keySet().size()]);
|
||||
for (String index : indices) {
|
||||
List<ShardRouting> allShardsIndex = allShards(index);
|
||||
@ -137,7 +141,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
* @throws IndexNotFoundException If the index passed does not exists
|
||||
*/
|
||||
public List<ShardRouting> allShards(String index) {
|
||||
List<ShardRouting> shards = Lists.newArrayList();
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
IndexRoutingTable indexRoutingTable = index(index);
|
||||
if (indexRoutingTable == null) {
|
||||
throw new IndexNotFoundException(index);
|
||||
@ -162,28 +166,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
* iterator contains a single ShardRouting pointing at the relocating target
|
||||
*/
|
||||
public GroupShardsIterator allActiveShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) {
|
||||
// use list here since we need to maintain identity across shards
|
||||
ArrayList<ShardIterator> set = new ArrayList<>();
|
||||
for (String index : indices) {
|
||||
IndexRoutingTable indexRoutingTable = index(index);
|
||||
if (indexRoutingTable == null) {
|
||||
continue;
|
||||
// we simply ignore indices that don't exists (make sense for operations that use it currently)
|
||||
}
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (shardRouting.active()) {
|
||||
set.add(shardRouting.shardsIt());
|
||||
if (includeRelocationTargets && shardRouting.relocating()) {
|
||||
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.buildTargetRelocatingShard())));
|
||||
}
|
||||
} else if (includeEmpty) { // we need this for counting properly, just make it an empty one
|
||||
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.<ShardRouting>emptyList()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new GroupShardsIterator(set);
|
||||
return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ACTIVE_PREDICATE);
|
||||
}
|
||||
|
||||
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty) {
|
||||
@ -198,6 +181,25 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
* iterator contains a single ShardRouting pointing at the relocating target
|
||||
*/
|
||||
public GroupShardsIterator allAssignedShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets) {
|
||||
return allSatisfyingPredicateShardsGrouped(indices, includeEmpty, includeRelocationTargets, ASSIGNED_PREDICATE);
|
||||
}
|
||||
|
||||
private static Predicate<ShardRouting> ACTIVE_PREDICATE = new Predicate<ShardRouting>() {
|
||||
@Override
|
||||
public boolean apply(ShardRouting shardRouting) {
|
||||
return shardRouting.active();
|
||||
}
|
||||
};
|
||||
|
||||
private static Predicate<ShardRouting> ASSIGNED_PREDICATE = new Predicate<ShardRouting>() {
|
||||
@Override
|
||||
public boolean apply(ShardRouting shardRouting) {
|
||||
return shardRouting.assignedToNode();
|
||||
}
|
||||
};
|
||||
|
||||
// TODO: replace with JDK 8 native java.util.function.Predicate
|
||||
private GroupShardsIterator allSatisfyingPredicateShardsGrouped(String[] indices, boolean includeEmpty, boolean includeRelocationTargets, Predicate<ShardRouting> predicate) {
|
||||
// use list here since we need to maintain identity across shards
|
||||
ArrayList<ShardIterator> set = new ArrayList<>();
|
||||
for (String index : indices) {
|
||||
@ -208,7 +210,7 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
}
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (shardRouting.assignedToNode()) {
|
||||
if (predicate.apply(shardRouting)) {
|
||||
set.add(shardRouting.shardsIt());
|
||||
if (includeRelocationTargets && shardRouting.relocating()) {
|
||||
set.add(new PlainShardIterator(shardRouting.shardId(), Collections.singletonList(shardRouting.buildTargetRelocatingShard())));
|
||||
@ -222,6 +224,38 @@ public class RoutingTable implements Iterable<IndexRoutingTable>, Diffable<Routi
|
||||
return new GroupShardsIterator(set);
|
||||
}
|
||||
|
||||
public ShardsIterator allShards(String[] indices) {
|
||||
return allShardsSatisfyingPredicate(indices, Predicates.<ShardRouting>alwaysTrue(), false);
|
||||
}
|
||||
|
||||
public ShardsIterator allShardsIncludingRelocationTargets(String[] indices) {
|
||||
return allShardsSatisfyingPredicate(indices, Predicates.<ShardRouting>alwaysTrue(), true);
|
||||
}
|
||||
|
||||
// TODO: replace with JDK 8 native java.util.function.Predicate
|
||||
private ShardsIterator allShardsSatisfyingPredicate(String[] indices, Predicate<ShardRouting> predicate, boolean includeRelocationTargets) {
|
||||
// use list here since we need to maintain identity across shards
|
||||
List<ShardRouting> shards = new ArrayList<>();
|
||||
for (String index : indices) {
|
||||
IndexRoutingTable indexRoutingTable = index(index);
|
||||
if (indexRoutingTable == null) {
|
||||
continue;
|
||||
// we simply ignore indices that don't exists (make sense for operations that use it currently)
|
||||
}
|
||||
for (IndexShardRoutingTable indexShardRoutingTable : indexRoutingTable) {
|
||||
for (ShardRouting shardRouting : indexShardRoutingTable) {
|
||||
if (predicate.apply(shardRouting)) {
|
||||
shards.add(shardRouting);
|
||||
if (includeRelocationTargets && shardRouting.relocating()) {
|
||||
shards.add(shardRouting.buildTargetRelocatingShard());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return new PlainShardsIterator(shards);
|
||||
}
|
||||
|
||||
/**
|
||||
* All the *active* primary shards for the provided indices grouped (each group is a single element, consisting
|
||||
* of the primary shard). This is handy for components that expect to get group iterators, but still want in some
|
||||
|
@ -21,17 +21,15 @@ package org.elasticsearch.cluster.routing;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.io.stream.Streamable;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
import static com.google.common.collect.Lists.newArrayListWithCapacity;
|
||||
import static com.google.common.collect.Maps.newHashMap;
|
||||
|
||||
/**
|
||||
@ -57,7 +55,7 @@ public class RoutingTableValidation implements Streamable {
|
||||
if (failures().isEmpty() && indicesFailures().isEmpty()) {
|
||||
return ImmutableList.of();
|
||||
}
|
||||
List<String> allFailures = newArrayList(failures());
|
||||
List<String> allFailures = new ArrayList<>(failures());
|
||||
for (Map.Entry<String, List<String>> entry : indicesFailures().entrySet()) {
|
||||
for (String failure : entry.getValue()) {
|
||||
allFailures.add("Index [" + entry.getKey() + "]: " + failure);
|
||||
@ -94,7 +92,7 @@ public class RoutingTableValidation implements Streamable {
|
||||
public void addFailure(String failure) {
|
||||
valid = false;
|
||||
if (failures == null) {
|
||||
failures = newArrayList();
|
||||
failures = new ArrayList<>();
|
||||
}
|
||||
failures.add(failure);
|
||||
}
|
||||
@ -106,7 +104,7 @@ public class RoutingTableValidation implements Streamable {
|
||||
}
|
||||
List<String> indexFailures = indicesFailures.get(index);
|
||||
if (indexFailures == null) {
|
||||
indexFailures = Lists.newArrayList();
|
||||
indexFailures = new ArrayList<>();
|
||||
indicesFailures.put(index, indexFailures);
|
||||
}
|
||||
indexFailures.add(failure);
|
||||
@ -124,7 +122,7 @@ public class RoutingTableValidation implements Streamable {
|
||||
if (size == 0) {
|
||||
failures = ImmutableList.of();
|
||||
} else {
|
||||
failures = Lists.newArrayListWithCapacity(size);
|
||||
failures = new ArrayList<>(size);
|
||||
for (int i = 0; i < size; i++) {
|
||||
failures.add(in.readString());
|
||||
}
|
||||
@ -137,7 +135,7 @@ public class RoutingTableValidation implements Streamable {
|
||||
for (int i = 0; i < size; i++) {
|
||||
String index = in.readString();
|
||||
int size2 = in.readVInt();
|
||||
List<String> indexFailures = newArrayListWithCapacity(size2);
|
||||
List<String> indexFailures = new ArrayList<>(size2);
|
||||
for (int j = 0; j < size2; j++) {
|
||||
indexFailures.add(in.readString());
|
||||
}
|
||||
|
@ -19,7 +19,6 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
@ -28,6 +27,7 @@ import org.elasticsearch.common.io.stream.Streamable;
|
||||
import org.elasticsearch.index.shard.ShardId;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
@ -88,7 +88,7 @@ public class AllocationExplanation implements Streamable {
|
||||
public AllocationExplanation add(ShardId shardId, NodeExplanation nodeExplanation) {
|
||||
List<NodeExplanation> list = explanations.get(shardId);
|
||||
if (list == null) {
|
||||
list = Lists.newArrayList();
|
||||
list = new ArrayList<>();
|
||||
explanations.put(shardId, list);
|
||||
}
|
||||
list.add(nodeExplanation);
|
||||
@ -121,7 +121,7 @@ public class AllocationExplanation implements Streamable {
|
||||
for (int i = 0; i < size; i++) {
|
||||
ShardId shardId = ShardId.readShardId(in);
|
||||
int size2 = in.readVInt();
|
||||
List<NodeExplanation> ne = Lists.newArrayListWithCapacity(size2);
|
||||
List<NodeExplanation> ne = new ArrayList<>(size2);
|
||||
for (int j = 0; j < size2; j++) {
|
||||
DiscoveryNode node = null;
|
||||
if (in.readBoolean()) {
|
||||
|
@ -21,12 +21,16 @@ package org.elasticsearch.cluster.routing.allocation;
|
||||
|
||||
import com.carrotsearch.hppc.cursors.ObjectCursor;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.cluster.ClusterInfoService;
|
||||
import org.elasticsearch.cluster.ClusterState;
|
||||
import org.elasticsearch.cluster.metadata.IndexMetaData;
|
||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||
import org.elasticsearch.cluster.routing.*;
|
||||
import org.elasticsearch.cluster.routing.IndexRoutingTable;
|
||||
import org.elasticsearch.cluster.routing.RoutingNode;
|
||||
import org.elasticsearch.cluster.routing.RoutingNodes;
|
||||
import org.elasticsearch.cluster.routing.RoutingTable;
|
||||
import org.elasticsearch.cluster.routing.ShardRouting;
|
||||
import org.elasticsearch.cluster.routing.UnassignedInfo;
|
||||
import org.elasticsearch.cluster.routing.allocation.allocator.ShardsAllocators;
|
||||
import org.elasticsearch.cluster.routing.allocation.command.AllocationCommands;
|
||||
import org.elasticsearch.cluster.routing.allocation.decider.AllocationDeciders;
|
||||
@ -36,7 +40,6 @@ import org.elasticsearch.common.inject.Inject;
|
||||
import org.elasticsearch.common.settings.Settings;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
|
||||
|
||||
@ -235,7 +238,7 @@ public class AllocationService extends AbstractComponent {
|
||||
}
|
||||
|
||||
// go over and remove dangling replicas that are initializing for primary shards
|
||||
List<ShardRouting> shardsToFail = Lists.newArrayList();
|
||||
List<ShardRouting> shardsToFail = new ArrayList<>();
|
||||
for (ShardRouting shardEntry : routingNodes.unassigned()) {
|
||||
if (shardEntry.primary()) {
|
||||
for (ShardRouting routing : routingNodes.assignedShards(shardEntry)) {
|
||||
|
@ -25,10 +25,9 @@ import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* Class used to encapsulate a number of {@link RerouteExplanation}
|
||||
* explanations.
|
||||
@ -37,7 +36,7 @@ public class RoutingExplanations implements ToXContent {
|
||||
private final List<RerouteExplanation> explanations;
|
||||
|
||||
public RoutingExplanations() {
|
||||
this.explanations = newArrayList();
|
||||
this.explanations = new ArrayList<>();
|
||||
}
|
||||
|
||||
public RoutingExplanations add(RerouteExplanation explanation) {
|
||||
|
@ -20,8 +20,8 @@
|
||||
package org.elasticsearch.cluster.routing.allocation.command;
|
||||
|
||||
import org.elasticsearch.ElasticsearchParseException;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingAllocation;
|
||||
import org.elasticsearch.cluster.routing.allocation.RoutingExplanations;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
@ -30,13 +30,12 @@ import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
import org.elasticsearch.common.xcontent.XContentParser;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Arrays;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import static com.google.common.collect.Lists.newArrayList;
|
||||
|
||||
/**
|
||||
* A simple {@link AllocationCommand} composite managing several
|
||||
* {@link AllocationCommand} implementations
|
||||
@ -73,7 +72,7 @@ public class AllocationCommands {
|
||||
registerFactory(MoveAllocationCommand.NAME, new MoveAllocationCommand.Factory());
|
||||
}
|
||||
|
||||
private final List<AllocationCommand> commands = newArrayList();
|
||||
private final List<AllocationCommand> commands = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Creates a new set of {@link AllocationCommands}
|
||||
|
@ -19,13 +19,13 @@
|
||||
|
||||
package org.elasticsearch.cluster.routing.allocation.decider;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.common.io.stream.StreamInput;
|
||||
import org.elasticsearch.common.io.stream.StreamOutput;
|
||||
import org.elasticsearch.common.xcontent.ToXContent;
|
||||
import org.elasticsearch.common.xcontent.XContentBuilder;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Locale;
|
||||
|
||||
@ -226,7 +226,7 @@ public abstract class Decision implements ToXContent {
|
||||
*/
|
||||
public static class Multi extends Decision {
|
||||
|
||||
private final List<Decision> decisions = Lists.newArrayList();
|
||||
private final List<Decision> decisions = new ArrayList<>();
|
||||
|
||||
/**
|
||||
* Add a decision to this {@link Multi}decision instance
|
||||
|
@ -164,7 +164,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
|
||||
@Override
|
||||
public void onNewInfo(ClusterInfo info) {
|
||||
Map<String, DiskUsage> usages = info.getNodeDiskUsages();
|
||||
Map<String, DiskUsage> usages = info.getNodeLeastAvailableDiskUsages();
|
||||
if (usages != null) {
|
||||
boolean reroute = false;
|
||||
String explanation = "";
|
||||
@ -339,7 +339,9 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
|
||||
final double usedDiskThresholdLow = 100.0 - DiskThresholdDecider.this.freeDiskThresholdLow;
|
||||
final double usedDiskThresholdHigh = 100.0 - DiskThresholdDecider.this.freeDiskThresholdHigh;
|
||||
DiskUsage usage = getDiskUsage(node, allocation);
|
||||
ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
Map<String, DiskUsage> usages = clusterInfo.getNodeMostAvailableDiskUsages();
|
||||
DiskUsage usage = getDiskUsage(node, allocation, usages);
|
||||
// First, check that the node currently over the low watermark
|
||||
double freeDiskPercentage = usage.getFreeDiskAsPercentage();
|
||||
// Cache the used disk percentage for displaying disk percentages consistent with documentation
|
||||
@ -441,11 +443,16 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
|
||||
@Override
|
||||
public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAllocation allocation) {
|
||||
if (shardRouting.currentNodeId().equals(node.nodeId()) == false) {
|
||||
throw new IllegalArgumentException("Shard [" + shardRouting + "] is not allocated on node: [" + node.nodeId() + "]");
|
||||
}
|
||||
final Decision decision = earlyTerminate(allocation);
|
||||
if (decision != null) {
|
||||
return decision;
|
||||
}
|
||||
DiskUsage usage = getDiskUsage(node, allocation);
|
||||
ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
Map<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
|
||||
DiskUsage usage = getDiskUsage(node, allocation, usages);
|
||||
// If this node is already above the high threshold, the shard cannot remain (get it off!)
|
||||
double freeDiskPercentage = usage.getFreeDiskAsPercentage();
|
||||
long freeBytes = usage.getFreeBytes();
|
||||
@ -472,9 +479,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
return allocation.decision(Decision.YES, NAME, "enough disk for shard to remain on node, free: [%s]", new ByteSizeValue(freeBytes));
|
||||
}
|
||||
|
||||
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation) {
|
||||
private DiskUsage getDiskUsage(RoutingNode node, RoutingAllocation allocation, Map<String, DiskUsage> usages) {
|
||||
ClusterInfo clusterInfo = allocation.clusterInfo();
|
||||
Map<String, DiskUsage> usages = clusterInfo.getNodeDiskUsages();
|
||||
DiskUsage usage = usages.get(node.nodeId());
|
||||
if (usage == null) {
|
||||
// If there is no usage, and we have other nodes in the cluster,
|
||||
@ -488,7 +494,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
|
||||
if (includeRelocations) {
|
||||
long relocatingShardsSize = sizeOfRelocatingShards(node, clusterInfo, true);
|
||||
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(),
|
||||
DiskUsage usageIncludingRelocations = new DiskUsage(node.nodeId(), node.node().name(), "_na_",
|
||||
usage.getTotalBytes(), usage.getFreeBytes() - relocatingShardsSize);
|
||||
if (logger.isTraceEnabled()) {
|
||||
logger.trace("usage without relocations: {}", usage);
|
||||
@ -508,7 +514,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
*/
|
||||
public DiskUsage averageUsage(RoutingNode node, Map<String, DiskUsage> usages) {
|
||||
if (usages.size() == 0) {
|
||||
return new DiskUsage(node.nodeId(), node.node().name(), 0, 0);
|
||||
return new DiskUsage(node.nodeId(), node.node().name(), "_na_", 0, 0);
|
||||
}
|
||||
long totalBytes = 0;
|
||||
long freeBytes = 0;
|
||||
@ -516,7 +522,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
totalBytes += du.getTotalBytes();
|
||||
freeBytes += du.getFreeBytes();
|
||||
}
|
||||
return new DiskUsage(node.nodeId(), node.node().name(), totalBytes / usages.size(), freeBytes / usages.size());
|
||||
return new DiskUsage(node.nodeId(), node.node().name(), "_na_", totalBytes / usages.size(), freeBytes / usages.size());
|
||||
}
|
||||
|
||||
/**
|
||||
@ -528,8 +534,8 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
*/
|
||||
public double freeDiskPercentageAfterShardAssigned(DiskUsage usage, Long shardSize) {
|
||||
shardSize = (shardSize == null) ? 0 : shardSize;
|
||||
DiskUsage newUsage = new DiskUsage(usage.getNodeId(), usage.getNodeName(),
|
||||
usage.getTotalBytes(), usage.getFreeBytes() - shardSize);
|
||||
DiskUsage newUsage = new DiskUsage(usage.getNodeId(), usage.getNodeName(), usage.getPath(),
|
||||
usage.getTotalBytes(), usage.getFreeBytes() - shardSize);
|
||||
return newUsage.getFreeDiskAsPercentage();
|
||||
}
|
||||
|
||||
@ -600,7 +606,7 @@ public class DiskThresholdDecider extends AllocationDecider {
|
||||
return allocation.decision(Decision.YES, NAME, "cluster info unavailable");
|
||||
}
|
||||
|
||||
final Map<String, DiskUsage> usages = clusterInfo.getNodeDiskUsages();
|
||||
final Map<String, DiskUsage> usages = clusterInfo.getNodeLeastAvailableDiskUsages();
|
||||
// Fail open if there are no disk usages available
|
||||
if (usages.isEmpty()) {
|
||||
if (logger.isTraceEnabled()) {
|
||||
|
@ -943,7 +943,8 @@ public class Strings {
|
||||
boolean changed = false;
|
||||
for (int i = 0; i < value.length(); i++) {
|
||||
char c = value.charAt(i);
|
||||
if (c == '_') {
|
||||
//e.g. _name stays as-is, _first_name becomes _firstName
|
||||
if (c == '_' && i > 0) {
|
||||
if (!changed) {
|
||||
if (sb != null) {
|
||||
sb.setLength(0);
|
||||
|
@ -36,7 +36,7 @@ public class ValidationException extends IllegalArgumentException {
|
||||
* Add a new validation error to the accumulating validation errors
|
||||
* @param error the error to add
|
||||
*/
|
||||
public void addValidationError(String error) {
|
||||
public final void addValidationError(String error) {
|
||||
validationErrors.add(error);
|
||||
}
|
||||
|
||||
@ -44,7 +44,7 @@ public class ValidationException extends IllegalArgumentException {
|
||||
* Add a sequence of validation errors to the accumulating validation errors
|
||||
* @param errors the errors to add
|
||||
*/
|
||||
public void addValidationErrors(Iterable<String> errors) {
|
||||
public final void addValidationErrors(Iterable<String> errors) {
|
||||
for (String error : errors) {
|
||||
validationErrors.add(error);
|
||||
}
|
||||
@ -52,14 +52,13 @@ public class ValidationException extends IllegalArgumentException {
|
||||
|
||||
/**
|
||||
* Returns the validation errors accumulated
|
||||
* @return
|
||||
*/
|
||||
public List<String> validationErrors() {
|
||||
public final List<String> validationErrors() {
|
||||
return validationErrors;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getMessage() {
|
||||
public final String getMessage() {
|
||||
StringBuilder sb = new StringBuilder();
|
||||
sb.append("Validation Failed: ");
|
||||
int index = 0;
|
||||
|
@ -46,8 +46,7 @@ public class LZFCompressor implements Compressor {
|
||||
|
||||
public LZFCompressor() {
|
||||
this.decoder = ChunkDecoderFactory.safeInstance();
|
||||
Loggers.getLogger(LZFCompressor.class).debug("using encoder [{}] and decoder[{}] ",
|
||||
this.decoder.getClass().getSimpleName());
|
||||
Loggers.getLogger(LZFCompressor.class).debug("using decoder[{}] ", this.decoder.getClass().getSimpleName());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -27,7 +27,6 @@ import org.elasticsearch.*;
|
||||
import org.elasticsearch.common.Base64;
|
||||
import org.elasticsearch.common.Nullable;
|
||||
import org.elasticsearch.common.unit.TimeValue;
|
||||
import org.elasticsearch.common.util.ByteArray;
|
||||
|
||||
import java.io.*;
|
||||
import java.net.HttpURLConnection;
|
||||
@ -37,9 +36,7 @@ import java.nio.file.Files;
|
||||
import java.nio.file.NoSuchFileException;
|
||||
import java.nio.file.Path;
|
||||
import java.nio.file.attribute.FileTime;
|
||||
import java.util.Arrays;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.Callable;
|
||||
|
||||
/**
|
||||
*
|
||||
@ -152,12 +149,6 @@ public class HttpDownloadHelper {
|
||||
} catch (FileNotFoundException | NoSuchFileException e) {
|
||||
// checksum file doesn't exist
|
||||
return false;
|
||||
} catch (IOException e) {
|
||||
if (ExceptionsHelper.unwrapCause(e) instanceof FileNotFoundException) {
|
||||
// checksum file didn't exist
|
||||
return false;
|
||||
}
|
||||
throw e;
|
||||
} finally {
|
||||
IOUtils.deleteFilesIgnoringExceptions(checksumFile);
|
||||
}
|
||||
@ -378,9 +369,6 @@ public class HttpDownloadHelper {
|
||||
responseCode == HttpURLConnection.HTTP_MOVED_TEMP ||
|
||||
responseCode == HttpURLConnection.HTTP_SEE_OTHER) {
|
||||
String newLocation = httpConnection.getHeaderField("Location");
|
||||
String message = aSource
|
||||
+ (responseCode == HttpURLConnection.HTTP_MOVED_PERM ? " permanently"
|
||||
: "") + " moved to " + newLocation;
|
||||
URL newURL = new URL(newLocation);
|
||||
if (!redirectionAllowed(aSource, newURL)) {
|
||||
return null;
|
||||
@ -426,7 +414,7 @@ public class HttpDownloadHelper {
|
||||
}
|
||||
}
|
||||
if (is == null) {
|
||||
throw new IOException("Can't get " + source + " to " + dest, lastEx);
|
||||
throw lastEx;
|
||||
}
|
||||
|
||||
os = Files.newOutputStream(dest);
|
||||
|
@ -17,10 +17,33 @@
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableSet;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.common.inject.internal.*;
|
||||
import org.elasticsearch.common.inject.spi.*;
|
||||
import org.elasticsearch.common.inject.internal.Annotations;
|
||||
import org.elasticsearch.common.inject.internal.BindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.internal.ErrorsException;
|
||||
import org.elasticsearch.common.inject.internal.ExposedBindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.InstanceBindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.InternalFactory;
|
||||
import org.elasticsearch.common.inject.internal.LinkedBindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.LinkedProviderBindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.ProviderInstanceBindingImpl;
|
||||
import org.elasticsearch.common.inject.internal.ProviderMethod;
|
||||
import org.elasticsearch.common.inject.internal.Scoping;
|
||||
import org.elasticsearch.common.inject.internal.UntargettedBindingImpl;
|
||||
import org.elasticsearch.common.inject.spi.BindingTargetVisitor;
|
||||
import org.elasticsearch.common.inject.spi.ConstructorBinding;
|
||||
import org.elasticsearch.common.inject.spi.ConvertedConstantBinding;
|
||||
import org.elasticsearch.common.inject.spi.ExposedBinding;
|
||||
import org.elasticsearch.common.inject.spi.InjectionPoint;
|
||||
import org.elasticsearch.common.inject.spi.InstanceBinding;
|
||||
import org.elasticsearch.common.inject.spi.LinkedKeyBinding;
|
||||
import org.elasticsearch.common.inject.spi.PrivateElements;
|
||||
import org.elasticsearch.common.inject.spi.ProviderBinding;
|
||||
import org.elasticsearch.common.inject.spi.ProviderInstanceBinding;
|
||||
import org.elasticsearch.common.inject.spi.ProviderKeyBinding;
|
||||
import org.elasticsearch.common.inject.spi.UntargettedBinding;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
@ -32,9 +55,9 @@ import java.util.Set;
|
||||
*/
|
||||
class BindingProcessor extends AbstractProcessor {
|
||||
|
||||
private final List<CreationListener> creationListeners = Lists.newArrayList();
|
||||
private final List<CreationListener> creationListeners = new ArrayList<>();
|
||||
private final Initializer initializer;
|
||||
private final List<Runnable> uninitializedBindings = Lists.newArrayList();
|
||||
private final List<Runnable> uninitializedBindings = new ArrayList<>();
|
||||
|
||||
BindingProcessor(Errors errors, Initializer initializer) {
|
||||
super(errors);
|
||||
|
@ -16,12 +16,12 @@
|
||||
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.spi.Element;
|
||||
import org.elasticsearch.common.inject.spi.MembersInjectorLookup;
|
||||
import org.elasticsearch.common.inject.spi.ProviderLookup;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
@ -32,7 +32,7 @@ import java.util.List;
|
||||
*/
|
||||
class DeferredLookups implements Lookups {
|
||||
private final InjectorImpl injector;
|
||||
private final List<Element> lookups = Lists.newArrayList();
|
||||
private final List<Element> lookups = new ArrayList<>();
|
||||
|
||||
public DeferredLookups(InjectorImpl injector) {
|
||||
this.injector = injector;
|
||||
|
@ -17,12 +17,12 @@
|
||||
package org.elasticsearch.common.inject;
|
||||
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Lists;
|
||||
import org.elasticsearch.common.inject.internal.Errors;
|
||||
import org.elasticsearch.common.inject.spi.InjectionListener;
|
||||
import org.elasticsearch.common.inject.spi.Message;
|
||||
import org.elasticsearch.common.inject.spi.TypeEncounter;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkState;
|
||||
@ -64,7 +64,7 @@ final class EncounterImpl<T> implements TypeEncounter<T> {
|
||||
checkState(valid, "Encounters may not be used after hear() returns.");
|
||||
|
||||
if (membersInjectors == null) {
|
||||
membersInjectors = Lists.newArrayList();
|
||||
membersInjectors = new ArrayList<>();
|
||||
}
|
||||
|
||||
membersInjectors.add(membersInjector);
|
||||
@ -75,7 +75,7 @@ final class EncounterImpl<T> implements TypeEncounter<T> {
|
||||
checkState(valid, "Encounters may not be used after hear() returns.");
|
||||
|
||||
if (injectionListeners == null) {
|
||||
injectionListeners = Lists.newArrayList();
|
||||
injectionListeners = new ArrayList<>();
|
||||
}
|
||||
|
||||
injectionListeners.add(injectionListener);
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user