Reflect renaming of bulk thread pool (elastic/x-pack-elasticsearch#4414)
The bulk thread pool was renamed to the write thread pool. This commit adds support for this in X-Pack. Specifically, a change is needed in monitoring to reflect the name change (and support the possibility that the user has the display name for the write thread pool as "bulk"). Original commit: elastic/x-pack-elasticsearch@c3c4b99be5
This commit is contained in:
parent
4e376070a3
commit
650b80a982
|
@ -138,9 +138,16 @@ public class NodeStatsMonitoringDoc extends FilteredMonitoringDoc {
|
|||
"node_stats.jvm.gc.collectors.old",
|
||||
"node_stats.jvm.gc.collectors.old.collection_count",
|
||||
"node_stats.jvm.gc.collectors.old.collection_time_in_millis",
|
||||
/*
|
||||
* We whitelist both bulk and write in case the user is running in a mixed-version cluster or has the display name
|
||||
* on the write thread pool set to "bulk".
|
||||
*/
|
||||
"node_stats.thread_pool.bulk.threads",
|
||||
"node_stats.thread_pool.bulk.queue",
|
||||
"node_stats.thread_pool.bulk.rejected",
|
||||
"node_stats.thread_pool.write.threads",
|
||||
"node_stats.thread_pool.write.queue",
|
||||
"node_stats.thread_pool.write.rejected",
|
||||
"node_stats.thread_pool.generic.threads",
|
||||
"node_stats.thread_pool.generic.queue",
|
||||
"node_stats.thread_pool.generic.rejected",
|
||||
|
|
|
@ -225,32 +225,32 @@ public class NodeStatsMonitoringDocTests extends BaseFilteredMonitoringDocTestCa
|
|||
+ "}"
|
||||
+ "},"
|
||||
+ "\"thread_pool\":{"
|
||||
+ "\"bulk\":{"
|
||||
+ "\"generic\":{"
|
||||
+ "\"threads\":59,"
|
||||
+ "\"queue\":60,"
|
||||
+ "\"rejected\":61"
|
||||
+ "},"
|
||||
+ "\"generic\":{"
|
||||
+ "\"get\":{"
|
||||
+ "\"threads\":62,"
|
||||
+ "\"queue\":63,"
|
||||
+ "\"rejected\":64"
|
||||
+ "},"
|
||||
+ "\"get\":{"
|
||||
+ "\"management\":{"
|
||||
+ "\"threads\":65,"
|
||||
+ "\"queue\":66,"
|
||||
+ "\"rejected\":67"
|
||||
+ "},"
|
||||
+ "\"management\":{"
|
||||
+ "\"search\":{"
|
||||
+ "\"threads\":68,"
|
||||
+ "\"queue\":69,"
|
||||
+ "\"rejected\":70"
|
||||
+ "},"
|
||||
+ "\"search\":{"
|
||||
+ "\"watcher\":{"
|
||||
+ "\"threads\":71,"
|
||||
+ "\"queue\":72,"
|
||||
+ "\"rejected\":73"
|
||||
+ "},"
|
||||
+ "\"watcher\":{"
|
||||
+ "\"write\":{"
|
||||
+ "\"threads\":74,"
|
||||
+ "\"queue\":75,"
|
||||
+ "\"rejected\":76"
|
||||
|
@ -348,12 +348,12 @@ public class NodeStatsMonitoringDocTests extends BaseFilteredMonitoringDocTestCa
|
|||
|
||||
// Threadpools
|
||||
final List<ThreadPoolStats.Stats> threadpools = new ArrayList<>();
|
||||
threadpools.add(new ThreadPoolStats.Stats("bulk", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
threadpools.add(new ThreadPoolStats.Stats("generic", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
threadpools.add(new ThreadPoolStats.Stats("get", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
threadpools.add(new ThreadPoolStats.Stats("management", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
threadpools.add(new ThreadPoolStats.Stats("search", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
threadpools.add(new ThreadPoolStats.Stats("watcher", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
threadpools.add(new ThreadPoolStats.Stats("write", (int) ++iota, (int) ++iota, (int) no, ++iota, (int) no, no));
|
||||
final ThreadPoolStats threadPool = new ThreadPoolStats(threadpools);
|
||||
|
||||
final DiscoveryNode discoveryNode = new DiscoveryNode("_node_name",
|
||||
|
|
|
@ -64,7 +64,7 @@ import java.util.stream.Collectors;
|
|||
import static org.elasticsearch.common.xcontent.ToXContent.EMPTY_PARAMS;
|
||||
import static org.elasticsearch.common.xcontent.support.XContentMapValues.extractValue;
|
||||
import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked;
|
||||
import static org.elasticsearch.threadpool.ThreadPool.Names.BULK;
|
||||
import static org.elasticsearch.threadpool.ThreadPool.Names.WRITE;
|
||||
import static org.elasticsearch.xpack.core.monitoring.exporter.MonitoringTemplateUtils.TEMPLATE_VERSION;
|
||||
import static org.hamcrest.Matchers.containsString;
|
||||
import static org.hamcrest.Matchers.equalTo;
|
||||
|
@ -560,7 +560,7 @@ public class MonitoringIT extends ESSingleNodeTestCase {
|
|||
boolean foundBulkThreads = false;
|
||||
|
||||
for(final ThreadPoolStats.Stats threadPoolStats : nodeStats.getThreadPool()) {
|
||||
if (BULK.equals(threadPoolStats.getName())) {
|
||||
if (WRITE.equals(threadPoolStats.getName())) {
|
||||
foundBulkThreads = true;
|
||||
assertThat("Still some active _bulk threads!", threadPoolStats.getActive(), equalTo(0));
|
||||
break;
|
||||
|
|
|
@ -180,7 +180,7 @@ public class XPackRestIT extends ESClientYamlSuiteTestCase {
|
|||
final Map<String, String> params = new HashMap<>();
|
||||
params.put("node_id", "_local");
|
||||
params.put("metric", "thread_pool");
|
||||
params.put("filter_path", "nodes.*.thread_pool.bulk.active");
|
||||
params.put("filter_path", "nodes.*.thread_pool.write.active");
|
||||
response = callApi("nodes.stats", params, emptyList(), getApiCallHeaders());
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
|
@ -189,8 +189,8 @@ public class XPackRestIT extends ESClientYamlSuiteTestCase {
|
|||
final Map<String, Object> node = (Map<String, Object>) nodes.values().iterator().next();
|
||||
|
||||
@SuppressWarnings("unchecked")
|
||||
final Number activeBulks = (Number) extractValue("thread_pool.bulk.active", node);
|
||||
return activeBulks != null && activeBulks.longValue() == 0L;
|
||||
final Number activeWrites = (Number) extractValue("thread_pool.write.active", node);
|
||||
return activeWrites != null && activeWrites.longValue() == 0L;
|
||||
} catch (Exception e) {
|
||||
throw new ElasticsearchException("Failed to wait for monitoring exporters to stop:", e);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue