mirror of
https://github.com/honeymoose/OpenSearch.git
synced 2025-02-25 14:26:27 +00:00
Cluster stats should not render empty http/transport types (#23735)
This commit changes the ClusterStatsNodes.NetworkTypes so that is does not print out empty field names when no Transport or HTTP type is defined: ``` { "network_types": { ... "http_types": { "": 2 } } } ``` is now rendered as: ``` { "network_types": { ... "http_types": { } } } ```
This commit is contained in:
parent
135eae42b9
commit
28099162ab
@ -25,6 +25,7 @@ import org.elasticsearch.Version;
|
|||||||
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||||
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
import org.elasticsearch.action.admin.cluster.node.stats.NodeStats;
|
||||||
import org.elasticsearch.cluster.node.DiscoveryNode;
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
import org.elasticsearch.common.Strings;
|
||||||
import org.elasticsearch.common.network.NetworkModule;
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
import org.elasticsearch.common.settings.Settings;
|
import org.elasticsearch.common.settings.Settings;
|
||||||
import org.elasticsearch.common.transport.TransportAddress;
|
import org.elasticsearch.common.transport.TransportAddress;
|
||||||
@ -73,7 +74,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
this.plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
this.plugins.addAll(nodeResponse.nodeInfo().getPlugins().getPluginInfos());
|
||||||
|
|
||||||
// now do the stats that should be deduped by hardware (implemented by ip deduping)
|
// now do the stats that should be deduped by hardware (implemented by ip deduping)
|
||||||
TransportAddress publishAddress = nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
TransportAddress publishAddress =
|
||||||
|
nodeResponse.nodeInfo().getTransport().address().publishAddress();
|
||||||
final InetAddress inetAddress = publishAddress.address().getAddress();
|
final InetAddress inetAddress = publishAddress.address().getAddress();
|
||||||
if (!seenAddresses.add(inetAddress)) {
|
if (!seenAddresses.add(inetAddress)) {
|
||||||
continue;
|
continue;
|
||||||
@ -209,7 +211,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||||
|
throws IOException {
|
||||||
builder.field(Fields.TOTAL, total);
|
builder.field(Fields.TOTAL, total);
|
||||||
for (Map.Entry<String, Integer> entry : roles.entrySet()) {
|
for (Map.Entry<String, Integer> entry : roles.entrySet()) {
|
||||||
builder.field(entry.getKey(), entry.getValue());
|
builder.field(entry.getKey(), entry.getValue());
|
||||||
@ -280,7 +283,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||||
|
throws IOException {
|
||||||
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
|
builder.field(Fields.AVAILABLE_PROCESSORS, availableProcessors);
|
||||||
builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors);
|
builder.field(Fields.ALLOCATED_PROCESSORS, allocatedProcessors);
|
||||||
builder.startArray(Fields.NAMES);
|
builder.startArray(Fields.NAMES);
|
||||||
@ -326,7 +330,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
// fd can be -1 if not supported on platform
|
// fd can be -1 if not supported on platform
|
||||||
totalOpenFileDescriptors += fd;
|
totalOpenFileDescriptors += fd;
|
||||||
}
|
}
|
||||||
// we still do min max calc on -1, so we'll have an indication of it not being supported on one of the nodes.
|
// we still do min max calc on -1, so we'll have an indication
|
||||||
|
// of it not being supported on one of the nodes.
|
||||||
minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
|
minOpenFileDescriptors = Math.min(minOpenFileDescriptors, fd);
|
||||||
maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
|
maxOpenFileDescriptors = Math.max(maxOpenFileDescriptors, fd);
|
||||||
}
|
}
|
||||||
@ -375,7 +380,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||||
|
throws IOException {
|
||||||
builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject();
|
builder.startObject(Fields.CPU).field(Fields.PERCENT, cpuPercent).endObject();
|
||||||
if (count > 0) {
|
if (count > 0) {
|
||||||
builder.startObject(Fields.OPEN_FILE_DESCRIPTORS);
|
builder.startObject(Fields.OPEN_FILE_DESCRIPTORS);
|
||||||
@ -479,7 +485,8 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
|
public XContentBuilder toXContent(XContentBuilder builder, Params params)
|
||||||
|
throws IOException {
|
||||||
builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
|
builder.timeValueField(Fields.MAX_UPTIME_IN_MILLIS, Fields.MAX_UPTIME, maxUptime);
|
||||||
builder.startArray(Fields.VERSIONS);
|
builder.startArray(Fields.VERSIONS);
|
||||||
for (ObjectIntCursor<JvmVersion> v : versions) {
|
for (ObjectIntCursor<JvmVersion> v : versions) {
|
||||||
@ -540,17 +547,25 @@ public class ClusterStatsNodes implements ToXContent {
|
|||||||
private final Map<String, AtomicInteger> transportTypes;
|
private final Map<String, AtomicInteger> transportTypes;
|
||||||
private final Map<String, AtomicInteger> httpTypes;
|
private final Map<String, AtomicInteger> httpTypes;
|
||||||
|
|
||||||
private NetworkTypes(final List<NodeInfo> nodeInfos) {
|
NetworkTypes(final List<NodeInfo> nodeInfos) {
|
||||||
final Map<String, AtomicInteger> transportTypes = new HashMap<>();
|
final Map<String, AtomicInteger> transportTypes = new HashMap<>();
|
||||||
final Map<String, AtomicInteger> httpTypes = new HashMap<>();
|
final Map<String, AtomicInteger> httpTypes = new HashMap<>();
|
||||||
for (final NodeInfo nodeInfo : nodeInfos) {
|
for (final NodeInfo nodeInfo : nodeInfos) {
|
||||||
final Settings settings = nodeInfo.getSettings();
|
final Settings settings = nodeInfo.getSettings();
|
||||||
final String transportType =
|
final String transportType =
|
||||||
settings.get(NetworkModule.TRANSPORT_TYPE_KEY, NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
settings.get(NetworkModule.TRANSPORT_TYPE_KEY,
|
||||||
|
NetworkModule.TRANSPORT_DEFAULT_TYPE_SETTING.get(settings));
|
||||||
final String httpType =
|
final String httpType =
|
||||||
settings.get(NetworkModule.HTTP_TYPE_KEY, NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
settings.get(NetworkModule.HTTP_TYPE_KEY,
|
||||||
transportTypes.computeIfAbsent(transportType, k -> new AtomicInteger()).incrementAndGet();
|
NetworkModule.HTTP_DEFAULT_TYPE_SETTING.get(settings));
|
||||||
httpTypes.computeIfAbsent(httpType, k -> new AtomicInteger()).incrementAndGet();
|
if (Strings.hasText(transportType)) {
|
||||||
|
transportTypes.computeIfAbsent(transportType,
|
||||||
|
k -> new AtomicInteger()).incrementAndGet();
|
||||||
|
}
|
||||||
|
if (Strings.hasText(httpType)) {
|
||||||
|
httpTypes.computeIfAbsent(httpType,
|
||||||
|
k -> new AtomicInteger()).incrementAndGet();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
this.transportTypes = Collections.unmodifiableMap(transportTypes);
|
this.transportTypes = Collections.unmodifiableMap(transportTypes);
|
||||||
this.httpTypes = Collections.unmodifiableMap(httpTypes);
|
this.httpTypes = Collections.unmodifiableMap(httpTypes);
|
||||||
|
@ -0,0 +1,76 @@
|
|||||||
|
/*
|
||||||
|
* Licensed to Elasticsearch under one or more contributor
|
||||||
|
* license agreements. See the NOTICE file distributed with
|
||||||
|
* this work for additional information regarding copyright
|
||||||
|
* ownership. Elasticsearch licenses this file to you under
|
||||||
|
* the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
* not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing,
|
||||||
|
* software distributed under the License is distributed on an
|
||||||
|
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
* KIND, either express or implied. See the License for the
|
||||||
|
* specific language governing permissions and limitations
|
||||||
|
* under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.elasticsearch.action.admin.cluster.stats;
|
||||||
|
|
||||||
|
import org.elasticsearch.action.admin.cluster.node.info.NodeInfo;
|
||||||
|
import org.elasticsearch.cluster.node.DiscoveryNode;
|
||||||
|
import org.elasticsearch.common.network.NetworkModule;
|
||||||
|
import org.elasticsearch.common.settings.Settings;
|
||||||
|
import org.elasticsearch.common.xcontent.XContentType;
|
||||||
|
import org.elasticsearch.test.ESTestCase;
|
||||||
|
|
||||||
|
import java.util.Arrays;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
|
import static java.util.Collections.emptyList;
|
||||||
|
import static java.util.Collections.singletonList;
|
||||||
|
import static org.elasticsearch.common.xcontent.XContentHelper.toXContent;
|
||||||
|
|
||||||
|
public class ClusterStatsNodesTests extends ESTestCase {
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that empty transport/http types are not printed out as part
|
||||||
|
* of the cluster stats xcontent output.
|
||||||
|
*/
|
||||||
|
public void testNetworkTypesToXContent() throws Exception {
|
||||||
|
ClusterStatsNodes.NetworkTypes stats = new ClusterStatsNodes.NetworkTypes(emptyList());
|
||||||
|
assertEquals("{\"transport_types\":{},\"http_types\":{}}",
|
||||||
|
toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString());
|
||||||
|
|
||||||
|
List<NodeInfo> nodeInfos = singletonList(createNodeInfo("node_0", null, null));
|
||||||
|
stats = new ClusterStatsNodes.NetworkTypes(nodeInfos);
|
||||||
|
assertEquals("{\"transport_types\":{},\"http_types\":{}}",
|
||||||
|
toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString());
|
||||||
|
|
||||||
|
nodeInfos = Arrays.asList(createNodeInfo("node_1", "", ""),
|
||||||
|
createNodeInfo("node_2", "custom", "custom"),
|
||||||
|
createNodeInfo("node_3", null, "custom"));
|
||||||
|
stats = new ClusterStatsNodes.NetworkTypes(nodeInfos);
|
||||||
|
assertEquals("{"
|
||||||
|
+ "\"transport_types\":{\"custom\":1},"
|
||||||
|
+ "\"http_types\":{\"custom\":2}"
|
||||||
|
+ "}", toXContent(stats, XContentType.JSON, randomBoolean()).utf8ToString());
|
||||||
|
}
|
||||||
|
|
||||||
|
private static NodeInfo createNodeInfo(String nodeId, String transportType, String httpType) {
|
||||||
|
Settings.Builder settings = Settings.builder();
|
||||||
|
if (transportType != null) {
|
||||||
|
settings.put(randomFrom(NetworkModule.TRANSPORT_TYPE_KEY,
|
||||||
|
NetworkModule.TRANSPORT_TYPE_DEFAULT_KEY), transportType);
|
||||||
|
}
|
||||||
|
if (httpType != null) {
|
||||||
|
settings.put(randomFrom(NetworkModule.HTTP_TYPE_KEY,
|
||||||
|
NetworkModule.HTTP_TYPE_DEFAULT_KEY), httpType);
|
||||||
|
}
|
||||||
|
return new NodeInfo(null, null,
|
||||||
|
new DiscoveryNode(nodeId, buildNewFakeTransportAddress(), null),
|
||||||
|
settings.build(), null, null, null, null, null, null, null, null, null);
|
||||||
|
}
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user