Added Cluster Stats API

Closes #4460
This commit is contained in:
Boaz Leskes 2013-12-10 11:15:57 +01:00
parent 75b6415b1a
commit 2b6214cff7
22 changed files with 2129 additions and 16 deletions

View File

@ -33,6 +33,8 @@ include::cluster/health.asciidoc[]
include::cluster/state.asciidoc[]
include::cluster/stats.asciidoc[]
include::cluster/pending.asciidoc[]
include::cluster/reroute.asciidoc[]

View File

@ -0,0 +1,165 @@
[[cluster-stats]]
== Cluster Stats
added[0.90.8]
The Cluster Stats API allows to retrieve statistics from a cluster wide perspective.
The API returns basic index metrics (shard numbers, store size, memory usage) and
information about the current nodes that form the cluster (number, roles, os, jvm
versions, memory usage, cpu and installed plugins).
[source,js]
--------------------------------------------------
curl -XGET 'http://localhost:9200/_cluster/stats?human'
--------------------------------------------------
Will return, for example:
[source,js]
--------------------------------------------------
{
"cluster_name": "elasticsearch",
"indices": {
"count": 3,
"shards": {
"total": 35,
"primaries": 15,
"replication": 1.333333333333333,
"index": {
"shards": {
"min": 10,
"max": 15,
"avg": 11.66666666666666
},
"primaries": {
"min": 5,
"max": 5,
"avg": 5
},
"replication": {
"min": 1,
"max": 2,
"avg": 1.3333333333333333
}
}
},
"docs": {
"count": 2,
"deleted": 0
},
"store": {
"size": "5.6kb",
"size_in_bytes": 5770,
"throttle_time": "0s",
"throttle_time_in_millis": 0
},
"fielddata": {
"memory_size": "0b",
"memory_size_in_bytes": 0,
"evictions": 0
},
"filter_cache": {
"memory_size": "0b",
"memory_size_in_bytes": 0,
"evictions": 0
},
"id_cache": {
"memory_size": "0b",
"memory_size_in_bytes": 0
},
"completion": {
"size": "0b",
"size_in_bytes": 0
},
"segments": {
"count": 2
}
},
"nodes": {
"count": {
"total": 2,
"master_only": 0,
"data_only": 0,
"master_data": 2,
"client": 0
},
"versions": [
"0.90.8"
],
"os": {
"available_processors": 4,
"mem": {
"total": "8gb",
"total_in_bytes": 8589934592
},
"cpu": [
{
"vendor": "Intel",
"model": "MacBookAir5,2",
"mhz": 2000,
"total_cores": 4,
"total_sockets": 4,
"cores_per_socket": 16,
"cache_size": "256b",
"cache_size_in_bytes": 256,
"count": 1
}
]
},
"process": {
"cpu": {
"percent": 3
},
"avg_open_file_descriptors": 218
},
"jvm": {
"max_uptime": "24s",
"max_uptime_in_millis": 24054,
"version": [
{
"version": "1.6.0_45",
"vm_name": "Java HotSpot(TM) 64-Bit Server VM",
"vm_version": "20.45-b01-451",
"vm_vendor": "Apple Inc.",
"count": 2
}
],
"mem": {
"heap_used": "38.3mb",
"heap_used_in_bytes": 40237120,
"heap_max": "1.9gb",
"heap_max_in_bytes": 2130051072
},
"threads": 89
},
"fs":
{
"total": "232.9gb",
"total_in_bytes": 250140434432,
"free": "31.3gb",
"free_in_bytes": 33705881600,
"available": "31.1gb",
"available_in_bytes": 33443737600,
"disk_reads": 21202753,
"disk_writes": 27028840,
"disk_io_op": 48231593,
"disk_read_size": "528gb",
"disk_read_size_in_bytes": 566980806656,
"disk_write_size": "617.9gb",
"disk_write_size_in_bytes": 663525366784,
"disk_io_size": "1145.9gb",
"disk_io_size_in_bytes": 1230506173440
},
"plugins": [
// all plugins installed on nodes
{
"name": "inquisitor",
"description": "",
"url": "/_plugin/inquisitor/",
"jvm": false,
"site": true
}
]
}
}
--------------------------------------------------

View File

@ -54,6 +54,8 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotA
import org.elasticsearch.action.admin.cluster.snapshots.restore.TransportRestoreSnapshotAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.cluster.state.TransportClusterStateAction;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
import org.elasticsearch.action.admin.cluster.stats.TransportClusterStatsAction;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
import org.elasticsearch.action.admin.cluster.tasks.TransportPendingClusterTasksAction;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesAction;
@ -199,6 +201,7 @@ public class ActionModule extends AbstractModule {
registerAction(NodesRestartAction.INSTANCE, TransportNodesRestartAction.class);
registerAction(NodesHotThreadsAction.INSTANCE, TransportNodesHotThreadsAction.class);
registerAction(ClusterStatsAction.INSTANCE, TransportClusterStatsAction.class);
registerAction(ClusterStateAction.INSTANCE, TransportClusterStateAction.class);
registerAction(ClusterHealthAction.INSTANCE, TransportClusterHealthAction.class);
registerAction(ClusterUpdateSettingsAction.INSTANCE, TransportClusterUpdateSettingsAction.class);

View File

@ -29,10 +29,11 @@ public class PluginInfo implements Streamable, Serializable, ToXContent {
/**
* Information about plugins
* @param name Its name
*
* @param name Its name
* @param description Its description
* @param site true if it's a site plugin
* @param jvm true if it's a jvm plugin
* @param site true if it's a site plugin
* @param jvm true if it's a jvm plugin
*/
public PluginInfo(String name, String description, boolean site, boolean jvm) {
this.name = name;
@ -71,6 +72,7 @@ public class PluginInfo implements Streamable, Serializable, ToXContent {
/**
* We compute the URL for sites: "/_plugin/" + name + "/"
*
* @return
*/
public String getUrl() {
@ -117,4 +119,24 @@ public class PluginInfo implements Streamable, Serializable, ToXContent {
return builder;
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
PluginInfo p = (PluginInfo) o;
return name.equals(p.getName());
}
@Override
public int hashCode() {
return name.hashCode();
}
}

View File

@ -0,0 +1,45 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.admin.cluster.ClusterAction;
import org.elasticsearch.client.ClusterAdminClient;
/**
*/
public class ClusterStatsAction extends ClusterAction<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
public static final ClusterStatsAction INSTANCE = new ClusterStatsAction();
public static final String NAME = "cluster/stats";
private ClusterStatsAction() {
super(NAME);
}
@Override
public ClusterStatsResponse newResponse() {
return new ClusterStatsResponse();
}
@Override
public ClusterStatsRequestBuilder newRequestBuilder(ClusterAdminClient client) {
return new ClusterStatsRequestBuilder(client);
}
}

View File

@ -0,0 +1,423 @@
package org.elasticsearch.action.admin.cluster.stats;
/*
* Licensed to ElasticSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.carrotsearch.hppc.ObjectObjectOpenHashMap;
import com.carrotsearch.hppc.cursors.ObjectObjectCursor;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.indices.stats.CommonStats;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.index.cache.filter.FilterCacheStats;
import org.elasticsearch.index.cache.id.IdCacheStats;
import org.elasticsearch.index.engine.SegmentsStats;
import org.elasticsearch.index.fielddata.FieldDataStats;
import org.elasticsearch.index.percolator.stats.PercolateStats;
import org.elasticsearch.index.shard.DocsStats;
import org.elasticsearch.index.store.StoreStats;
import org.elasticsearch.search.suggest.completion.CompletionStats;
import java.io.IOException;
public class ClusterStatsIndices implements ToXContent, Streamable {
private int indexCount;
private ShardStats shards;
private DocsStats docs;
private StoreStats store;
private FieldDataStats fieldData;
private FilterCacheStats filterCache;
private IdCacheStats idCache;
private CompletionStats completion;
private SegmentsStats segments;
private PercolateStats peroclate;
private ClusterStatsIndices() {
}
public ClusterStatsIndices(ClusterStatsNodeResponse[] nodeResponses) {
ObjectObjectOpenHashMap<String, ShardStats> countsPerIndex = new ObjectObjectOpenHashMap<String, ShardStats>();
this.docs = new DocsStats();
this.store = new StoreStats();
this.fieldData = new FieldDataStats();
this.filterCache = new FilterCacheStats();
this.idCache = new IdCacheStats();
this.completion = new CompletionStats();
this.segments = new SegmentsStats();
this.peroclate = new PercolateStats();
for (ClusterStatsNodeResponse r : nodeResponses) {
for (org.elasticsearch.action.admin.indices.stats.ShardStats shardStats : r.shardsStats()) {
ShardStats indexShardStats = countsPerIndex.get(shardStats.getIndex());
if (indexShardStats == null) {
indexShardStats = new ShardStats();
countsPerIndex.put(shardStats.getIndex(), indexShardStats);
}
indexShardStats.total++;
CommonStats shardCommonStats = shardStats.getStats();
if (shardStats.getShardRouting().primary()) {
indexShardStats.primaries++;
docs.add(shardCommonStats.docs);
}
store.add(shardCommonStats.store);
fieldData.add(shardCommonStats.fieldData);
filterCache.add(shardCommonStats.filterCache);
idCache.add(shardCommonStats.idCache);
completion.add(shardCommonStats.completion);
segments.add(shardCommonStats.segments);
peroclate.add(shardCommonStats.percolate);
}
}
shards = new ShardStats();
indexCount = countsPerIndex.size();
for (ObjectObjectCursor<String, ShardStats> indexCountsCursor : countsPerIndex) {
shards.addIndexShardCount(indexCountsCursor.value);
}
}
public int getIndexCount() {
return indexCount;
}
public ShardStats getShards() {
return this.shards;
}
public DocsStats getDocs() {
return docs;
}
public StoreStats getStore() {
return store;
}
public FieldDataStats getFieldData() {
return fieldData;
}
public FilterCacheStats getFilterCache() {
return filterCache;
}
public IdCacheStats getIdCache() {
return idCache;
}
public CompletionStats getCompletion() {
return completion;
}
public SegmentsStats getSegments() {
return segments;
}
public PercolateStats getPercolate() {
return peroclate;
}
@Override
public void readFrom(StreamInput in) throws IOException {
indexCount = in.readVInt();
shards = ShardStats.readShardStats(in);
docs = DocsStats.readDocStats(in);
store = StoreStats.readStoreStats(in);
fieldData = FieldDataStats.readFieldDataStats(in);
filterCache = FilterCacheStats.readFilterCacheStats(in);
idCache = IdCacheStats.readIdCacheStats(in);
completion = CompletionStats.readCompletionStats(in);
segments = SegmentsStats.readSegmentsStats(in);
if (in.getVersion().after(Version.V_1_0_0_RC1)) {
peroclate = PercolateStats.readPercolateStats(in);
} else {
peroclate = new PercolateStats();
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(indexCount);
shards.writeTo(out);
docs.writeTo(out);
store.writeTo(out);
fieldData.writeTo(out);
filterCache.writeTo(out);
idCache.writeTo(out);
completion.writeTo(out);
segments.writeTo(out);
if (out.getVersion().onOrAfter(Version.V_1_0_0_RC1)) {
peroclate.writeTo(out);
}
}
public static ClusterStatsIndices readIndicesStats(StreamInput in) throws IOException {
ClusterStatsIndices indicesStats = new ClusterStatsIndices();
indicesStats.readFrom(in);
return indicesStats;
}
static final class Fields {
static final XContentBuilderString COUNT = new XContentBuilderString("count");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.COUNT, indexCount);
shards.toXContent(builder, params);
docs.toXContent(builder, params);
store.toXContent(builder, params);
fieldData.toXContent(builder, params);
filterCache.toXContent(builder, params);
idCache.toXContent(builder, params);
completion.toXContent(builder, params);
segments.toXContent(builder, params);
peroclate.toXContent(builder, params);
return builder;
}
public static class ShardStats implements ToXContent, Streamable {
int indices;
int total;
int primaries;
// min/max
int minIndexShards = -1;
int maxIndexShards = -1;
int minIndexPrimaryShards = -1;
int maxIndexPrimaryShards = -1;
double minIndexReplication = -1;
double totalIndexReplication = 0;
double maxIndexReplication = -1;
public ShardStats() {
}
/**
* number of indices in the cluster
*/
public int getIndices() {
return this.indices;
}
/**
* total number of shards in the cluster
*/
public int getTotal() {
return this.total;
}
/**
* total number of primary shards in the cluster
*/
public int getPrimaries() {
return this.primaries;
}
/**
* returns how many *redundant* copies of the data the cluster holds - running with no replicas will return 0
*/
public double getReplication() {
if (primaries == 0) {
return 0;
}
return (((double) (total - primaries)) / primaries);
}
/** the maximum number of shards (primary+replicas) an index has */
public int getMaxIndexShards() {
return this.maxIndexShards;
}
/** the minimum number of shards (primary+replicas) an index has */
public int getMinIndexShards() {
return this.minIndexShards;
}
/** average number of shards (primary+replicas) across the indices */
public double getAvgIndexShards() {
if (this.indices == 0) {
return -1;
}
return ((double) this.total) / this.indices;
}
/**
* the maximum number of primary shards an index has
*/
public int getMaxIndexPrimaryShards() {
return this.maxIndexPrimaryShards;
}
/** the minimum number of primary shards an index has */
public int getMinIndexPrimaryShards() {
return this.minIndexPrimaryShards;
}
/** the average number primary shards across the indices */
public double getAvgIndexPrimaryShards() {
if (this.indices == 0) {
return -1;
}
return ((double) this.primaries) / this.indices;
}
/**
* minimum replication factor across the indices. See {@link #getReplication}
*/
public double getMinIndexReplication() {
return this.minIndexReplication;
}
/**
* average replication factor across the indices. See {@link #getReplication}
*/
public double getAvgIndexReplication() {
if (indices == 0) {
return -1;
}
return this.totalIndexReplication / this.indices;
}
/**
* maximum replication factor across the indices. See {@link #getReplication
*/
public double getMaxIndexReplication() {
return this.maxIndexReplication;
}
public void addIndexShardCount(ShardStats indexShardCount) {
this.indices++;
this.primaries += indexShardCount.primaries;
this.total += indexShardCount.total;
this.totalIndexReplication += indexShardCount.getReplication();
if (this.indices == 1) {
// first index, uninitialized.
minIndexPrimaryShards = indexShardCount.primaries;
maxIndexPrimaryShards = indexShardCount.primaries;
minIndexShards = indexShardCount.total;
maxIndexShards = indexShardCount.total;
minIndexReplication = indexShardCount.getReplication();
maxIndexReplication = minIndexReplication;
} else {
minIndexShards = Math.min(minIndexShards, indexShardCount.total);
minIndexPrimaryShards = Math.min(minIndexPrimaryShards, indexShardCount.primaries);
minIndexReplication = Math.min(minIndexReplication, indexShardCount.getReplication());
maxIndexShards = Math.max(maxIndexShards, indexShardCount.total);
maxIndexPrimaryShards = Math.max(maxIndexPrimaryShards, indexShardCount.primaries);
maxIndexReplication = Math.max(maxIndexReplication, indexShardCount.getReplication());
}
}
public static ShardStats readShardStats(StreamInput in) throws IOException {
ShardStats c = new ShardStats();
c.readFrom(in);
return c;
}
@Override
public void readFrom(StreamInput in) throws IOException {
indices = in.readVInt();
total = in.readVInt();
primaries = in.readVInt();
minIndexShards = in.readVInt();
maxIndexShards = in.readVInt();
minIndexPrimaryShards = in.readVInt();
maxIndexPrimaryShards = in.readVInt();
minIndexReplication = in.readDouble();
totalIndexReplication = in.readDouble();
maxIndexReplication = in.readDouble();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(indices);
out.writeVInt(total);
out.writeVInt(primaries);
out.writeVInt(minIndexShards);
out.writeVInt(maxIndexShards);
out.writeVInt(minIndexPrimaryShards);
out.writeVInt(maxIndexPrimaryShards);
out.writeDouble(minIndexReplication);
out.writeDouble(totalIndexReplication);
out.writeDouble(maxIndexReplication);
}
static final class Fields {
static final XContentBuilderString SHARDS = new XContentBuilderString("shards");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString PRIMARIES = new XContentBuilderString("primaries");
static final XContentBuilderString REPLICATION = new XContentBuilderString("replication");
static final XContentBuilderString MIN = new XContentBuilderString("min");
static final XContentBuilderString MAX = new XContentBuilderString("max");
static final XContentBuilderString AVG = new XContentBuilderString("avg");
static final XContentBuilderString INDEX = new XContentBuilderString("index");
}
private void addIntMinMax(XContentBuilderString field, int min, int max, double avg, XContentBuilder builder) throws IOException {
builder.startObject(field);
builder.field(Fields.MIN, min);
builder.field(Fields.MAX, max);
builder.field(Fields.AVG, avg);
builder.endObject();
}
private void addDoubleMinMax(XContentBuilderString field, double min, double max, double avg, XContentBuilder builder) throws IOException {
builder.startObject(field);
builder.field(Fields.MIN, min);
builder.field(Fields.MAX, max);
builder.field(Fields.AVG, avg);
builder.endObject();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.SHARDS);
if (indices > 0) {
builder.field(Fields.TOTAL, total);
builder.field(Fields.PRIMARIES, primaries);
builder.field(Fields.REPLICATION, getReplication());
builder.startObject(Fields.INDEX);
addIntMinMax(Fields.SHARDS, minIndexShards, maxIndexShards, getAvgIndexShards(), builder);
addIntMinMax(Fields.PRIMARIES, minIndexPrimaryShards, maxIndexPrimaryShards, getAvgIndexPrimaryShards(), builder);
addDoubleMinMax(Fields.REPLICATION, minIndexReplication, maxIndexReplication, getAvgIndexReplication(), builder);
builder.endObject();
}
builder.endObject();
return builder;
}
@Override
public String toString() {
return "total [" + total + "] primaries [" + primaries + "]";
}
}
}

View File

@ -0,0 +1,88 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.support.nodes.NodeOperationResponse;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
public class ClusterStatsNodeResponse extends NodeOperationResponse {
private NodeInfo nodeInfo;
private NodeStats nodeStats;
private ShardStats[] shardsStats;
ClusterStatsNodeResponse() {
}
public ClusterStatsNodeResponse(DiscoveryNode node, NodeInfo nodeInfo, NodeStats nodeStats, ShardStats[] shardsStats) {
super(node);
this.nodeInfo = nodeInfo;
this.nodeStats = nodeStats;
this.shardsStats = shardsStats;
}
public NodeInfo nodeInfo() {
return this.nodeInfo;
}
public NodeStats nodeStats() {
return this.nodeStats;
}
public ShardStats[] shardsStats() {
return this.shardsStats;
}
public static ClusterStatsNodeResponse readNodeResponse(StreamInput in) throws IOException {
ClusterStatsNodeResponse nodeResponse = new ClusterStatsNodeResponse();
nodeResponse.readFrom(in);
return nodeResponse;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
this.nodeInfo = NodeInfo.readNodeInfo(in);
this.nodeStats = NodeStats.readNodeStats(in);
int size = in.readVInt();
shardsStats = new ShardStats[size];
for (size--; size >= 0; size--) {
shardsStats[size] = ShardStats.readShardStats(in);
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
nodeInfo.writeTo(out);
nodeStats.writeTo(out);
out.writeVInt(shardsStats.length);
for (ShardStats ss : shardsStats) {
ss.writeTo(out);
}
}
}

View File

@ -0,0 +1,658 @@
package org.elasticsearch.action.admin.cluster.stats;
/*
* Licensed to ElasticSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import com.carrotsearch.hppc.ObjectIntOpenHashMap;
import com.carrotsearch.hppc.cursors.ObjectIntCursor;
import org.elasticsearch.Version;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.info.PluginInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.io.stream.Streamable;
import org.elasticsearch.common.transport.InetSocketTransportAddress;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.monitor.fs.FsStats;
import org.elasticsearch.monitor.jvm.JvmInfo;
import org.elasticsearch.monitor.os.OsInfo;
import java.io.IOException;
import java.net.InetAddress;
import java.util.HashSet;
import java.util.Set;
public class ClusterStatsNodes implements ToXContent, Streamable {
private Counts counts;
private Set<Version> versions;
private OsStats os;
private ProcessStats process;
private JvmStats jvm;
private FsStats.Info fs;
private Set<PluginInfo> plugins;
private ClusterStatsNodes() {
}
public ClusterStatsNodes(ClusterStatsNodeResponse[] nodeResponses) {
this.counts = new Counts();
this.versions = new HashSet<Version>();
this.os = new OsStats();
this.jvm = new JvmStats();
this.fs = new FsStats.Info();
this.plugins = new HashSet<PluginInfo>();
this.process = new ProcessStats();
Set<InetAddress> seenAddresses = new HashSet<InetAddress>(nodeResponses.length);
for (ClusterStatsNodeResponse nodeResponse : nodeResponses) {
counts.addNodeInfo(nodeResponse.nodeInfo());
versions.add(nodeResponse.nodeInfo().getVersion());
process.addNodeStats(nodeResponse.nodeStats());
jvm.addNodeInfoStats(nodeResponse.nodeInfo(), nodeResponse.nodeStats());
plugins.addAll(nodeResponse.nodeInfo().getPlugins().getInfos());
// now do the stats that should be deduped by hardware (implemented by ip deduping)
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
InetAddress inetAddress = null;
if (publishAddress.uniqueAddressTypeId() == 1) {
inetAddress = ((InetSocketTransportAddress) publishAddress).address().getAddress();
}
if (!seenAddresses.add(inetAddress)) {
continue;
}
os.addNodeInfo(nodeResponse.nodeInfo());
if (nodeResponse.nodeStats().getFs() != null) {
fs.add(nodeResponse.nodeStats().getFs().total());
}
}
}
public Counts getCounts() {
return this.counts;
}
public Set<Version> getVersions() {
return versions;
}
public OsStats getOs() {
return os;
}
public ProcessStats getProcess() {
return process;
}
public JvmStats getJvm() {
return jvm;
}
public FsStats.Info getFs() {
return fs;
}
public Set<PluginInfo> getPlugins() {
return plugins;
}
@Override
public void readFrom(StreamInput in) throws IOException {
counts = Counts.readCounts(in);
int size = in.readVInt();
versions = new HashSet<Version>(size);
for (; size > 0; size--) {
versions.add(Version.readVersion(in));
}
os = OsStats.readOsStats(in);
process = ProcessStats.readStats(in);
jvm = JvmStats.readJvmStats(in);
fs = FsStats.Info.readInfoFrom(in);
size = in.readVInt();
plugins = new HashSet<PluginInfo>(size);
for (; size > 0; size--) {
plugins.add(PluginInfo.readPluginInfo(in));
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
counts.writeTo(out);
out.writeVInt(versions.size());
for (Version v : versions) Version.writeVersion(v, out);
os.writeTo(out);
process.writeTo(out);
jvm.writeTo(out);
fs.writeTo(out);
out.writeVInt(plugins.size());
for (PluginInfo p : plugins) {
p.writeTo(out);
}
}
public static ClusterStatsNodes readNodeStats(StreamInput in) throws IOException {
ClusterStatsNodes nodeStats = new ClusterStatsNodes();
nodeStats.readFrom(in);
return nodeStats;
}
static final class Fields {
static final XContentBuilderString COUNT = new XContentBuilderString("count");
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
static final XContentBuilderString OS = new XContentBuilderString("os");
static final XContentBuilderString PROCESS = new XContentBuilderString("process");
static final XContentBuilderString JVM = new XContentBuilderString("jvm");
static final XContentBuilderString FS = new XContentBuilderString("fs");
static final XContentBuilderString PLUGINS = new XContentBuilderString("plugins");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.COUNT);
counts.toXContent(builder, params);
builder.endObject();
builder.startArray(Fields.VERSIONS);
for (Version v : versions) {
builder.value(v.toString());
}
builder.endArray();
builder.startObject(Fields.OS);
os.toXContent(builder, params);
builder.endObject();
builder.startObject(Fields.PROCESS);
process.toXContent(builder, params);
builder.endObject();
builder.startObject(Fields.JVM);
jvm.toXContent(builder, params);
builder.endObject();
builder.field(Fields.FS);
fs.toXContent(builder, params);
builder.startArray(Fields.PLUGINS);
for (PluginInfo pluginInfo : plugins) {
pluginInfo.toXContent(builder, params);
}
builder.endArray();
return builder;
}
public static class Counts implements Streamable, ToXContent {
int total;
int masterOnly;
int dataOnly;
int masterData;
int client;
public void addNodeInfo(NodeInfo nodeInfo) {
total++;
DiscoveryNode node = nodeInfo.getNode();
if (node.masterNode()) {
if (node.dataNode()) {
masterData++;
} else {
masterOnly++;
}
} else if (node.dataNode()) {
dataOnly++;
} else if (node.clientNode()) {
client++;
}
}
public int getTotal() {
return total;
}
public int getMasterOnly() {
return masterOnly;
}
public int getDataOnly() {
return dataOnly;
}
public int getMasterData() {
return masterData;
}
public int getClient() {
return client;
}
public static Counts readCounts(StreamInput in) throws IOException {
Counts c = new Counts();
c.readFrom(in);
return c;
}
@Override
public void readFrom(StreamInput in) throws IOException {
total = in.readVInt();
masterOnly = in.readVInt();
dataOnly = in.readVInt();
masterData = in.readVInt();
client = in.readVInt();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(total);
out.writeVInt(masterOnly);
out.writeVInt(dataOnly);
out.writeVInt(masterData);
out.writeVInt(client);
}
static final class Fields {
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString MASTER_ONLY = new XContentBuilderString("master_only");
static final XContentBuilderString DATA_ONLY = new XContentBuilderString("data_only");
static final XContentBuilderString MASTER_DATA = new XContentBuilderString("master_data");
static final XContentBuilderString CLIENT = new XContentBuilderString("client");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.TOTAL, total);
builder.field(Fields.MASTER_ONLY, masterOnly);
builder.field(Fields.DATA_ONLY, dataOnly);
builder.field(Fields.MASTER_DATA, masterData);
builder.field(Fields.CLIENT, client);
return builder;
}
}
public static class OsStats implements ToXContent, Streamable {
int availableProcessors;
long availableMemory;
ObjectIntOpenHashMap<OsInfo.Cpu> cpus;
public OsStats() {
cpus = new ObjectIntOpenHashMap<org.elasticsearch.monitor.os.OsInfo.Cpu>();
}
public void addNodeInfo(NodeInfo nodeInfo) {
availableProcessors += nodeInfo.getOs().availableProcessors();
if (nodeInfo.getOs() == null) {
return;
}
if (nodeInfo.getOs().cpu() != null) {
cpus.addTo(nodeInfo.getOs().cpu(), 1);
}
if (nodeInfo.getOs().getMem() != null && nodeInfo.getOs().getMem().getTotal().bytes() != -1) {
availableMemory += nodeInfo.getOs().getMem().getTotal().bytes();
}
}
public int getAvailableProcessors() {
return availableProcessors;
}
public ByteSizeValue getAvailableMemory() {
return new ByteSizeValue(availableMemory);
}
public ObjectIntOpenHashMap<OsInfo.Cpu> getCpus() {
return cpus;
}
@Override
public void readFrom(StreamInput in) throws IOException {
availableProcessors = in.readVInt();
availableMemory = in.readLong();
int size = in.readVInt();
cpus = new ObjectIntOpenHashMap<OsInfo.Cpu>(size);
for (; size > 0; size--) {
cpus.addTo(OsInfo.Cpu.readCpu(in), in.readVInt());
}
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(availableProcessors);
out.writeLong(availableMemory);
out.writeVInt(cpus.size());
for (ObjectIntCursor<OsInfo.Cpu> c : cpus) {
c.key.writeTo(out);
out.writeVInt(c.value);
}
}
public static OsStats readOsStats(StreamInput in) throws IOException {
OsStats os = new OsStats();
os.readFrom(in);
return os;
}
static final class Fields {
static final XContentBuilderString AVAILABLE_PROCESSORS = new XContentBuilderString("available_processors");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString TOTAL = new XContentBuilderString("total");
static final XContentBuilderString TOTAL_IN_BYTES = new XContentBuilderString("total_in_bytes");
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.TOTAL_IN_BYTES, Fields.TOTAL, availableMemory);
builder.endObject();
builder.startArray(Fields.CPU);
for (ObjectIntCursor<OsInfo.Cpu> cpu : cpus) {
builder.startObject();
cpu.key.toXContent(builder, params);
builder.field(Fields.COUNT, cpu.value);
builder.endObject();
}
builder.endArray();
return builder;
}
}
public static class ProcessStats implements ToXContent, Streamable {
int count;
int cpuPercent;
long totalOpenFileDescriptors;
public void addNodeStats(NodeStats nodeStats) {
if (nodeStats.getProcess() == null) {
return;
}
count++;
if (nodeStats.getProcess().cpu() != null) {
// with no sigar, this may not be available
cpuPercent += nodeStats.getProcess().cpu().getPercent();
}
totalOpenFileDescriptors += nodeStats.getProcess().openFileDescriptors();
}
/**
* Cpu usage in percentages - 100 is 1 core.
*/
public int getCpuPercent() {
return cpuPercent;
}
public long getAvgOpenFileDescriptors() {
if (count == 0) {
return -1;
}
return totalOpenFileDescriptors / count;
}
@Override
public void readFrom(StreamInput in) throws IOException {
count = in.readVInt();
cpuPercent = in.readVInt();
totalOpenFileDescriptors = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(count);
out.writeVInt(cpuPercent);
out.writeVLong(totalOpenFileDescriptors);
}
public static ProcessStats readStats(StreamInput in) throws IOException {
ProcessStats cpu = new ProcessStats();
cpu.readFrom(in);
return cpu;
}
static final class Fields {
static final XContentBuilderString CPU = new XContentBuilderString("cpu");
static final XContentBuilderString PERCENT = new XContentBuilderString("percent");
static final XContentBuilderString AVG_OPEN_FD = new XContentBuilderString("avg_open_file_descriptors");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject();
builder.field(Fields.AVG_OPEN_FD, getAvgOpenFileDescriptors());
return builder;
}
}
public static class JvmStats implements Streamable, ToXContent {
ObjectIntOpenHashMap<JvmVersion> versions;
long threads;
long maxUptime;
long heapUsed;
long heapMax;
JvmStats() {
versions = new ObjectIntOpenHashMap<JvmVersion>();
threads = 0;
maxUptime = 0;
heapMax = 0;
heapUsed = 0;
}
public ObjectIntOpenHashMap<JvmVersion> getVersions() {
return versions;
}
/**
* The total number of threads in the cluster
*/
public long getThreads() {
return threads;
}
/**
* The maximum uptime of a node in the cluster
*/
public TimeValue getMaxUpTime() {
return new TimeValue(maxUptime);
}
/**
* Total heap used in the cluster
*/
public ByteSizeValue getHeapUsed() {
return new ByteSizeValue(heapUsed);
}
/**
* Maximum total heap available to the cluster
*/
public ByteSizeValue getHeapMax() {
return new ByteSizeValue(heapMax);
}
public void addNodeInfoStats(NodeInfo nodeInfo, NodeStats nodeStats) {
versions.addTo(new JvmVersion(nodeInfo.getJvm()), 1);
org.elasticsearch.monitor.jvm.JvmStats js = nodeStats.getJvm();
if (js == null) {
return;
}
if (js.threads() != null) {
threads += js.threads().count();
}
maxUptime = Math.max(maxUptime, js.uptime().millis());
if (js.mem() != null) {
heapUsed += js.mem().getHeapUsed().bytes();
heapMax += js.mem().getHeapMax().bytes();
}
}
@Override
public void readFrom(StreamInput in) throws IOException {
int size = in.readVInt();
versions = new ObjectIntOpenHashMap<JvmVersion>(size);
for (; size > 0; size--) {
versions.addTo(JvmVersion.readJvmVersion(in), in.readVInt());
}
threads = in.readVLong();
maxUptime = in.readVLong();
heapUsed = in.readVLong();
heapMax = in.readVLong();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeVInt(versions.size());
for (ObjectIntCursor<JvmVersion> v : versions) {
v.key.writeTo(out);
out.writeVInt(v.value);
}
out.writeVLong(threads);
out.writeVLong(maxUptime);
out.writeVLong(heapUsed);
out.writeVLong(heapMax);
}
public static JvmStats readJvmStats(StreamInput in) throws IOException {
JvmStats jvmStats = new JvmStats();
jvmStats.readFrom(in);
return jvmStats;
}
static final class Fields {
static final XContentBuilderString VERSIONS = new XContentBuilderString("versions");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
static final XContentBuilderString VM_NAME = new XContentBuilderString("vm_name");
static final XContentBuilderString VM_VERSION = new XContentBuilderString("vm_version");
static final XContentBuilderString VM_VENDOR = new XContentBuilderString("vm_vendor");
static final XContentBuilderString COUNT = new XContentBuilderString("count");
static final XContentBuilderString THREADS = new XContentBuilderString("threads");
static final XContentBuilderString MAX_UPTIME = new XContentBuilderString("max_uptime");
static final XContentBuilderString MAX_UPTIME_IN_MILLIS = new XContentBuilderString("max_uptime_in_millis");
static final XContentBuilderString MEM = new XContentBuilderString("mem");
static final XContentBuilderString HEAP_USED = new XContentBuilderString("heap_used");
static final XContentBuilderString HEAP_USED_IN_BYTES = new XContentBuilderString("heap_used_in_bytes");
static final XContentBuilderString HEAP_MAX = new XContentBuilderString("heap_max");
static final XContentBuilderString HEAP_MAX_IN_BYTES = new XContentBuilderString("heap_max_in_bytes");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
builder.startArray(Fields.VERSIONS);
for (ObjectIntCursor<JvmVersion> v : versions) {
builder.startObject();
builder.field(Fields.VERSION, v.key.version);
builder.field(Fields.VM_NAME, v.key.vmName);
builder.field(Fields.VM_VERSION, v.key.vmVersion);
builder.field(Fields.VM_VENDOR, v.key.vmVendor);
builder.field(Fields.COUNT, v.value);
builder.endObject();
}
builder.endArray();
builder.startObject(Fields.MEM);
builder.byteSizeField(Fields.HEAP_USED_IN_BYTES, Fields.HEAP_USED, heapUsed);
builder.byteSizeField(Fields.HEAP_MAX_IN_BYTES, Fields.HEAP_MAX, heapMax);
builder.endObject();
builder.field(Fields.THREADS, threads);
return builder;
}
}
public static class JvmVersion implements Streamable {
String version;
String vmName;
String vmVersion;
String vmVendor;
JvmVersion(JvmInfo jvmInfo) {
version = jvmInfo.version();
vmName = jvmInfo.vmName();
vmVersion = jvmInfo.vmVersion();
vmVendor = jvmInfo.vmVendor();
}
JvmVersion() {
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
JvmVersion jvm = (JvmVersion) o;
return vmVersion.equals(jvm.vmVersion) && vmVendor.equals(jvm.vmVendor);
}
@Override
public int hashCode() {
return vmVersion.hashCode();
}
public static JvmVersion readJvmVersion(StreamInput in) throws IOException {
JvmVersion jvm = new JvmVersion();
jvm.readFrom(in);
return jvm;
}
@Override
public void readFrom(StreamInput in) throws IOException {
version = in.readString();
vmName = in.readString();
vmVersion = in.readString();
vmVendor = in.readString();
}
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(version);
out.writeString(vmName);
out.writeString(vmVersion);
out.writeString(vmVendor);
}
}
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.support.nodes.NodesOperationRequest;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import java.io.IOException;
/**
* A request to get cluster level stats.
*/
public class ClusterStatsRequest extends NodesOperationRequest<ClusterStatsRequest> {
/**
* Get stats from nodes based on the nodes ids specified. If none are passed, stats
* based on all nodes will be returned.
*/
public ClusterStatsRequest(String... nodesIds) {
super(nodesIds);
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
}
}

View File

@ -0,0 +1,40 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.nodes.NodesOperationRequestBuilder;
import org.elasticsearch.client.ClusterAdminClient;
import org.elasticsearch.client.internal.InternalClusterAdminClient;
/**
*
*/
public class ClusterStatsRequestBuilder extends NodesOperationRequestBuilder<ClusterStatsRequest, ClusterStatsResponse, ClusterStatsRequestBuilder> {
public ClusterStatsRequestBuilder(ClusterAdminClient clusterClient) {
super((InternalClusterAdminClient) clusterClient, new ClusterStatsRequest());
}
@Override
protected void doExecute(ActionListener<ClusterStatsResponse> listener) {
((ClusterAdminClient) client).clusterStats(request, listener);
}
}

View File

@ -0,0 +1,135 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.action.support.nodes.NodesOperationResponse;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.xcontent.ToXContent;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.common.xcontent.XContentBuilderString;
import org.elasticsearch.common.xcontent.XContentFactory;
import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
/**
*
*/
public class ClusterStatsResponse extends NodesOperationResponse<ClusterStatsNodeResponse> implements ToXContent {
ClusterStatsNodes nodesStats;
ClusterStatsIndices indicesStats;
String clusterUUID;
ClusterStatsResponse() {
}
public ClusterStatsResponse(ClusterName clusterName, String clusterUUID, ClusterStatsNodeResponse[] nodes) {
super(clusterName, null);
this.clusterUUID = clusterUUID;
nodesStats = new ClusterStatsNodes(nodes);
indicesStats = new ClusterStatsIndices(nodes);
}
public ClusterStatsNodes getNodesStats() {
return nodesStats;
}
public ClusterStatsIndices getIndicesStats() {
return indicesStats;
}
@Override
public ClusterStatsNodeResponse[] getNodes() {
throw new UnsupportedOperationException();
}
@Override
public Map<String, ClusterStatsNodeResponse> getNodesMap() {
throw new UnsupportedOperationException();
}
@Override
public ClusterStatsNodeResponse getAt(int position) {
throw new UnsupportedOperationException();
}
@Override
public Iterator<ClusterStatsNodeResponse> iterator() {
throw new UnsupportedOperationException();
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
clusterUUID = in.readString();
nodesStats = ClusterStatsNodes.readNodeStats(in);
indicesStats = ClusterStatsIndices.readIndicesStats(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(clusterUUID);
nodesStats.writeTo(out);
indicesStats.writeTo(out);
}
static final class Fields {
static final XContentBuilderString NODES = new XContentBuilderString("nodes");
static final XContentBuilderString INDICES = new XContentBuilderString("indices");
static final XContentBuilderString UUID = new XContentBuilderString("uuid");
static final XContentBuilderString CLUSTER_NAME = new XContentBuilderString("cluster_name");
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.CLUSTER_NAME, getClusterName().value());
if (params.paramAsBoolean("output_uuid", false)) {
builder.field(Fields.UUID, clusterUUID);
}
builder.startObject(Fields.INDICES);
indicesStats.toXContent(builder, params);
builder.endObject();
builder.startObject(Fields.NODES);
nodesStats.toXContent(builder, params);
builder.endObject();
return builder;
}
@Override
public String toString() {
try {
XContentBuilder builder = XContentFactory.jsonBuilder().prettyPrint();
builder.startObject();
toXContent(builder, EMPTY_PARAMS);
builder.endObject();
return builder.string();
} catch (IOException e) {
return "{ \"error\" : \"" + e.getMessage() + "\"}";
}
}
}

View File

@ -0,0 +1,166 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.admin.cluster.stats;
import org.elasticsearch.ElasticSearchException;
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
import org.elasticsearch.action.admin.indices.stats.CommonStatsFlags;
import org.elasticsearch.action.admin.indices.stats.ShardStats;
import org.elasticsearch.action.support.nodes.NodeOperationRequest;
import org.elasticsearch.action.support.nodes.TransportNodesOperationAction;
import org.elasticsearch.cluster.ClusterName;
import org.elasticsearch.cluster.ClusterService;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.index.service.IndexService;
import org.elasticsearch.index.shard.service.IndexShard;
import org.elasticsearch.indices.IndicesService;
import org.elasticsearch.node.service.NodeService;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportService;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.atomic.AtomicReferenceArray;
/**
*
*/
public class TransportClusterStatsAction extends TransportNodesOperationAction<ClusterStatsRequest, ClusterStatsResponse,
TransportClusterStatsAction.ClusterStatsNodeRequest, ClusterStatsNodeResponse> {
private static final CommonStatsFlags SHARD_STATS_FLAGS = new CommonStatsFlags(CommonStatsFlags.Flag.Docs, CommonStatsFlags.Flag.Store,
CommonStatsFlags.Flag.FieldData, CommonStatsFlags.Flag.FilterCache, CommonStatsFlags.Flag.IdCache,
CommonStatsFlags.Flag.Completion, CommonStatsFlags.Flag.Segments, CommonStatsFlags.Flag.Percolate);
private final NodeService nodeService;
private final IndicesService indicesService;
@Inject
public TransportClusterStatsAction(Settings settings, ClusterName clusterName, ThreadPool threadPool,
ClusterService clusterService, TransportService transportService,
NodeService nodeService, IndicesService indicesService) {
super(settings, clusterName, threadPool, clusterService, transportService);
this.nodeService = nodeService;
this.indicesService = indicesService;
}
@Override
protected String executor() {
return ThreadPool.Names.MANAGEMENT;
}
@Override
protected String transportAction() {
return ClusterStatsAction.NAME;
}
@Override
protected ClusterStatsResponse newResponse(ClusterStatsRequest clusterStatsRequest, AtomicReferenceArray responses) {
final List<ClusterStatsNodeResponse> nodeStats = new ArrayList<ClusterStatsNodeResponse>(responses.length());
for (int i = 0; i < responses.length(); i++) {
Object resp = responses.get(i);
if (resp instanceof ClusterStatsNodeResponse) {
nodeStats.add((ClusterStatsNodeResponse) resp);
}
}
return new ClusterStatsResponse(clusterName, clusterService.state().metaData().uuid(), nodeStats.toArray(new ClusterStatsNodeResponse[nodeStats.size()]));
}
@Override
protected ClusterStatsRequest newRequest() {
return new ClusterStatsRequest();
}
@Override
protected ClusterStatsNodeRequest newNodeRequest() {
return new ClusterStatsNodeRequest();
}
@Override
protected ClusterStatsNodeRequest newNodeRequest(String nodeId, ClusterStatsRequest request) {
return new ClusterStatsNodeRequest(nodeId, request);
}
@Override
protected ClusterStatsNodeResponse newNodeResponse() {
return new ClusterStatsNodeResponse();
}
@Override
protected ClusterStatsNodeResponse nodeOperation(ClusterStatsNodeRequest nodeRequest) throws ElasticSearchException {
NodeInfo nodeInfo = nodeService.info(false, true, false, true, false, false, true, false, true);
NodeStats nodeStats = nodeService.stats(CommonStatsFlags.NONE, false, true, true, false, false, true, false, false);
List<ShardStats> shardsStats = new ArrayList<ShardStats>();
for (String index : indicesService.indices()) {
IndexService indexService = indicesService.indexService(index);
if (indexService == null) {
continue;
}
for (IndexShard indexShard : indexService) {
logger.warn("" + indexShard.shardId() + " " + indexShard.state());
if (indexShard.routingEntry().active()) {
// only report on fully started shards
shardsStats.add(new ShardStats(indexShard, SHARD_STATS_FLAGS));
}
}
}
return new ClusterStatsNodeResponse(nodeInfo.getNode(), nodeInfo, nodeStats, shardsStats.toArray(new ShardStats[shardsStats.size()]));
}
@Override
protected boolean accumulateExceptions() {
return false;
}
static class ClusterStatsNodeRequest extends NodeOperationRequest {
ClusterStatsRequest request;
ClusterStatsNodeRequest() {
}
ClusterStatsNodeRequest(String nodeId, ClusterStatsRequest request) {
super(request, nodeId);
this.request = request;
}
@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
request = new ClusterStatsRequest();
request.readFrom(in);
}
@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
request.writeTo(out);
}
}
}

View File

@ -40,6 +40,20 @@ public class CommonStatsFlags implements Streamable, Cloneable {
private String[] fieldDataFields = null;
private String[] completionDataFields = null;
/**
* @param flags flags to set. If no flags are supplied, default flags will be set.
*/
public CommonStatsFlags(Flag... flags) {
if (flags.length > 0) {
clear();
for (Flag f : flags) {
this.flags.add(f);
}
}
}
/**
* Sets all flags to return all stats.
*/
@ -136,6 +150,7 @@ public class CommonStatsFlags implements Streamable, Cloneable {
flags.add(flag);
}
public CommonStatsFlags set(Flag flag, boolean add) {
if (add) {
set(flag);

View File

@ -72,6 +72,9 @@ import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotR
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksResponse;
@ -188,10 +191,30 @@ public interface ClusterAdminClient {
*/
NodesInfoRequestBuilder prepareNodesInfo(String... nodesIds);
/**
* Cluster wide aggregated stats.
*
* @param request The cluster stats request
* @return The result future
* @see org.elasticsearch.client.Requests#clusterStatsRequest
*/
ActionFuture<ClusterStatsResponse> clusterStats(ClusterStatsRequest request);
/**
* Cluster wide aggregated stats
*
* @param request The cluster stats request
* @param listener A listener to be notified with a result
* @see org.elasticsearch.client.Requests#clusterStatsRequest()
*/
void clusterStats(ClusterStatsRequest request, ActionListener<ClusterStatsResponse> listener);
ClusterStatsRequestBuilder prepareClusterStats();
/**
* Nodes stats of the cluster.
*
* @param request The nodes info request
* @param request The nodes stats request
* @return The result future
* @see org.elasticsearch.client.Requests#nodesStatsRequest(String...)
*/

View File

@ -35,6 +35,7 @@ import org.elasticsearch.action.admin.cluster.snapshots.delete.DeleteSnapshotReq
import org.elasticsearch.action.admin.cluster.snapshots.get.GetSnapshotsRequest;
import org.elasticsearch.action.admin.cluster.snapshots.restore.RestoreSnapshotRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
import org.elasticsearch.action.admin.indices.alias.IndicesAliasesRequest;
import org.elasticsearch.action.admin.indices.cache.clear.ClearIndicesCacheRequest;
import org.elasticsearch.action.admin.indices.close.CloseIndexRequest;
@ -425,6 +426,16 @@ public class Requests {
return new NodesStatsRequest(nodesIds);
}
/**
* Creates a cluster stats request.
*
* @return The cluster stats request
* @see org.elasticsearch.client.ClusterAdminClient#clusterStats(org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest)
*/
public static ClusterStatsRequest clusterStatsRequest() {
return new ClusterStatsRequest();
}
/**
* Shuts down all nodes in the cluster.
*/

View File

@ -89,6 +89,10 @@ import org.elasticsearch.action.admin.cluster.state.ClusterStateAction;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequest;
import org.elasticsearch.action.admin.cluster.state.ClusterStateRequestBuilder;
import org.elasticsearch.action.admin.cluster.state.ClusterStateResponse;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsAction;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequestBuilder;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksAction;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequest;
import org.elasticsearch.action.admin.cluster.tasks.PendingClusterTasksRequestBuilder;
@ -195,6 +199,21 @@ public abstract class AbstractClusterAdminClient implements InternalClusterAdmin
return new NodesStatsRequestBuilder(this).setNodesIds(nodesIds);
}
@Override
public ActionFuture<ClusterStatsResponse> clusterStats(ClusterStatsRequest request) {
return execute(ClusterStatsAction.INSTANCE, request);
}
@Override
public void clusterStats(ClusterStatsRequest request, ActionListener<ClusterStatsResponse> listener) {
execute(ClusterStatsAction.INSTANCE, request, listener);
}
@Override
public ClusterStatsRequestBuilder prepareClusterStats() {
return new ClusterStatsRequestBuilder(this);
}
@Override
public ActionFuture<NodesHotThreadsResponse> nodesHotThreads(NodesHotThreadsRequest request) {
return execute(NodesHotThreadsAction.INSTANCE, request);

View File

@ -89,4 +89,5 @@ public class InternalTransportClusterAdminClient extends AbstractClusterAdminCli
}
}, listener);
}
}

View File

@ -20,6 +20,7 @@
package org.elasticsearch.monitor.fs;
import com.google.common.collect.Iterators;
import org.elasticsearch.Version;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.io.stream.StreamInput;
@ -56,9 +57,19 @@ public class FsStats implements Iterable<FsStats.Info>, Streamable, ToXContent {
double diskQueue = -1;
double diskServiceTime = -1;
static public Info readInfoFrom(StreamInput in) throws IOException {
Info i = new Info();
i.readFrom(in);
return i;
}
@Override
public void readFrom(StreamInput in) throws IOException {
path = in.readString();
if (in.getVersion().after(Version.V_0_90_7)) {
path = in.readOptionalString();
} else {
path = in.readString();
}
mount = in.readOptionalString();
dev = in.readOptionalString();
total = in.readLong();
@ -74,7 +85,11 @@ public class FsStats implements Iterable<FsStats.Info>, Streamable, ToXContent {
@Override
public void writeTo(StreamOutput out) throws IOException {
out.writeString(path);
if (out.getVersion().after(Version.V_0_90_7)) {
out.writeOptionalString(path); // total aggregates do not have a path
} else {
out.writeString(path);
}
out.writeOptionalString(mount);
out.writeOptionalString(dev);
out.writeLong(total);
@ -324,8 +339,7 @@ public class FsStats implements Iterable<FsStats.Info>, Streamable, ToXContent {
timestamp = in.readVLong();
infos = new Info[in.readVInt()];
for (int i = 0; i < infos.length; i++) {
infos[i] = new Info();
infos[i].readFrom(in);
infos[i] = Info.readInfoFrom(in);
}
}

View File

@ -115,13 +115,7 @@ public class OsInfo implements Streamable, Serializable, ToXContent {
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
if (cpu != null) {
builder.startObject(Fields.CPU);
builder.field(Fields.VENDOR, cpu.vendor());
builder.field(Fields.MODEL, cpu.model());
builder.field(Fields.MHZ, cpu.mhz());
builder.field(Fields.TOTAL_CORES, cpu.totalCores());
builder.field(Fields.TOTAL_SOCKETS, cpu.totalSockets());
builder.field(Fields.CORES_PER_SOCKET, cpu.coresPerSocket());
builder.byteSizeField(Fields.CACHE_SIZE_IN_BYTES, Fields.CACHE_SIZE, cpu.cacheSize);
cpu.toXContent(builder, params);
builder.endObject();
}
if (mem != null) {
@ -251,7 +245,7 @@ public class OsInfo implements Streamable, Serializable, ToXContent {
}
public static class Cpu implements Streamable, Serializable {
public static class Cpu implements Streamable, Serializable, ToXContent {
String vendor = "";
String model = "";
@ -348,5 +342,36 @@ public class OsInfo implements Streamable, Serializable, ToXContent {
out.writeInt(coresPerSocket);
out.writeLong(cacheSize);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
Cpu cpu = (Cpu) o;
return model.equals(cpu.model) && vendor.equals(cpu.vendor);
}
@Override
public int hashCode() {
return model.hashCode();
}
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
builder.field(Fields.VENDOR, vendor);
builder.field(Fields.MODEL, model);
builder.field(Fields.MHZ, mhz);
builder.field(Fields.TOTAL_CORES, totalCores);
builder.field(Fields.TOTAL_SOCKETS, totalSockets);
builder.field(Fields.CORES_PER_SOCKET, coresPerSocket);
builder.byteSizeField(Fields.CACHE_SIZE_IN_BYTES, Fields.CACHE_SIZE, cacheSize);
return builder;
}
}
}

View File

@ -41,6 +41,7 @@ import org.elasticsearch.rest.action.admin.cluster.snapshots.delete.RestDeleteSn
import org.elasticsearch.rest.action.admin.cluster.snapshots.get.RestGetSnapshotsAction;
import org.elasticsearch.rest.action.admin.cluster.snapshots.restore.RestRestoreSnapshotAction;
import org.elasticsearch.rest.action.admin.cluster.state.RestClusterStateAction;
import org.elasticsearch.rest.action.admin.cluster.stats.RestClusterStatsAction;
import org.elasticsearch.rest.action.admin.cluster.tasks.RestPendingClusterTasksAction;
import org.elasticsearch.rest.action.admin.indices.alias.RestGetIndicesAliasesAction;
import org.elasticsearch.rest.action.admin.indices.alias.RestIndicesAliasesAction;
@ -125,6 +126,7 @@ public class RestActionModule extends AbstractModule {
bind(RestNodesHotThreadsAction.class).asEagerSingleton();
bind(RestNodesShutdownAction.class).asEagerSingleton();
bind(RestNodesRestartAction.class).asEagerSingleton();
bind(RestClusterStatsAction.class).asEagerSingleton();
bind(RestClusterStateAction.class).asEagerSingleton();
bind(RestClusterHealthAction.class).asEagerSingleton();
bind(RestClusterUpdateSettingsAction.class).asEagerSingleton();

View File

@ -0,0 +1,75 @@
/*
* Licensed to ElasticSearch and Shay Banon under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.rest.action.admin.cluster.stats;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsRequest;
import org.elasticsearch.action.admin.cluster.stats.ClusterStatsResponse;
import org.elasticsearch.client.Client;
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.xcontent.XContentBuilder;
import org.elasticsearch.rest.*;
import org.elasticsearch.rest.action.support.RestXContentBuilder;
import java.io.IOException;
/**
*
*/
public class RestClusterStatsAction extends BaseRestHandler {
@Inject
public RestClusterStatsAction(Settings settings, Client client, RestController controller) {
super(settings, client);
controller.registerHandler(RestRequest.Method.GET, "/_cluster/stats", this);
controller.registerHandler(RestRequest.Method.GET, "/_cluster/stats/nodes/{nodeId}", this);
}
@Override
public void handleRequest(final RestRequest request, final RestChannel channel) {
ClusterStatsRequest clusterStatsRequest = new ClusterStatsRequest().nodesIds(request.paramAsStringArray("nodeId", null));
clusterStatsRequest.listenerThreaded(false);
client.admin().cluster().clusterStats(clusterStatsRequest, new ActionListener<ClusterStatsResponse>() {
@Override
public void onResponse(ClusterStatsResponse response) {
try {
XContentBuilder builder = RestXContentBuilder.restContentBuilder(request);
builder.startObject();
response.toXContent(builder, request);
builder.endObject();
channel.sendResponse(new XContentRestResponse(request, RestStatus.OK, builder));
} catch (Throwable e) {
onFailure(e);
}
}
@Override
public void onFailure(Throwable e) {
try {
channel.sendResponse(new XContentThrowableRestResponse(request, e));
} catch (IOException e1) {
logger.error("Failed to send failure response", e1);
}
}
});
}
}

View File

@ -0,0 +1,130 @@
package org.elasticsearch.action.admin.cluster.stats;
/*
* Licensed to ElasticSearch under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. ElasticSearch licenses this
* file to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
import org.elasticsearch.Version;
import org.elasticsearch.common.settings.ImmutableSettings;
import org.elasticsearch.monitor.sigar.SigarService;
import org.elasticsearch.test.ElasticsearchIntegrationTest;
import org.elasticsearch.test.ElasticsearchIntegrationTest.ClusterScope;
import org.hamcrest.Matchers;
import org.junit.Test;
@ClusterScope(scope = ElasticsearchIntegrationTest.Scope.TEST, numNodes = 0)
public class ClusterStatsTests extends ElasticsearchIntegrationTest {
private void assertCounts(ClusterStatsNodes.Counts counts, int total, int masterOnly, int dataOnly, int masterData, int client) {
assertThat(counts.getTotal(), Matchers.equalTo(total));
assertThat(counts.getMasterOnly(), Matchers.equalTo(masterOnly));
assertThat(counts.getDataOnly(), Matchers.equalTo(dataOnly));
assertThat(counts.getMasterData(), Matchers.equalTo(masterData));
assertThat(counts.getClient(), Matchers.equalTo(client));
}
@Test
public void testNodeCounts() {
cluster().startNode();
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 1, 0, 0, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.data", false));
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 2, 1, 0, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.master", false));
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 3, 1, 1, 1, 0);
cluster().startNode(ImmutableSettings.builder().put("node.client", true));
response = client().admin().cluster().prepareClusterStats().get();
assertCounts(response.getNodesStats().getCounts(), 4, 1, 1, 1, 1);
}
private void assertShardStats(ClusterStatsIndices.ShardStats stats, int indices, int total, int primaries, double replicationFactor) {
assertThat(stats.getIndices(), Matchers.equalTo(indices));
assertThat(stats.getTotal(), Matchers.equalTo(total));
assertThat(stats.getPrimaries(), Matchers.equalTo(primaries));
assertThat(stats.getReplication(), Matchers.equalTo(replicationFactor));
}
@Test
public void testIndicesShardStats() {
cluster().startNode();
prepareCreate("test1").setSettings("number_of_shards", 2, "number_of_replicas", 1).get();
ensureYellow();
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(0l));
assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(1));
assertShardStats(response.getIndicesStats().getShards(), 1, 2, 2, 0.0);
// add another node, replicas should get assigned
cluster().startNode();
ensureGreen();
index("test1", "type", "1", "f", "f");
refresh(); // make the doc visible
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.indicesStats.getDocs().getCount(), Matchers.equalTo(1l));
assertShardStats(response.getIndicesStats().getShards(), 1, 4, 2, 1.0);
prepareCreate("test2").setSettings("number_of_shards", 3, "number_of_replicas", 0).get();
ensureGreen();
response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.indicesStats.getIndexCount(), Matchers.equalTo(2));
assertShardStats(response.getIndicesStats().getShards(), 2, 7, 5, 2.0 / 5);
assertThat(response.getIndicesStats().getShards().getAvgIndexPrimaryShards(), Matchers.equalTo(2.5));
assertThat(response.getIndicesStats().getShards().getMinIndexPrimaryShards(), Matchers.equalTo(2));
assertThat(response.getIndicesStats().getShards().getMaxIndexPrimaryShards(), Matchers.equalTo(3));
assertThat(response.getIndicesStats().getShards().getAvgIndexShards(), Matchers.equalTo(3.5));
assertThat(response.getIndicesStats().getShards().getMinIndexShards(), Matchers.equalTo(3));
assertThat(response.getIndicesStats().getShards().getMaxIndexShards(), Matchers.equalTo(4));
assertThat(response.getIndicesStats().getShards().getAvgIndexReplication(), Matchers.equalTo(0.5));
assertThat(response.getIndicesStats().getShards().getMinIndexReplication(), Matchers.equalTo(0.0));
assertThat(response.getIndicesStats().getShards().getMaxIndexReplication(), Matchers.equalTo(1.0));
}
@Test
public void testValuesSmokeScreen() {
cluster().ensureAtMostNumNodes(5);
cluster().ensureAtLeastNumNodes(1);
SigarService sigarService = cluster().getInstance(SigarService.class);
index("test1", "type", "1", "f", "f");
ClusterStatsResponse response = client().admin().cluster().prepareClusterStats().get();
assertThat(response.indicesStats.getStore().getSizeInBytes(), Matchers.greaterThan(0l));
assertThat(response.nodesStats.getFs().getTotal().bytes(), Matchers.greaterThan(0l));
assertThat(response.nodesStats.getJvm().getVersions().size(), Matchers.greaterThan(0));
if (sigarService.sigarAvailable()) {
// We only get those if we have sigar
assertThat(response.nodesStats.getOs().getAvailableProcessors(), Matchers.greaterThan(0));
assertThat(response.nodesStats.getOs().getAvailableMemory().bytes(), Matchers.greaterThan(0l));
assertThat(response.nodesStats.getOs().getCpus().size(), Matchers.greaterThan(0));
}
assertThat(response.nodesStats.getVersions().size(), Matchers.greaterThan(0));
assertThat(response.nodesStats.getVersions().contains(Version.CURRENT), Matchers.equalTo(true));
assertThat(response.nodesStats.getPlugins().size(), Matchers.greaterThanOrEqualTo(0));
}
}