HBASE-20904 Prometheus /metrics http endpoint for monitoring (#4691)

Co-authored-by: Luca Kovacs <kovacs.luca.agota@gmail.com>
Co-authored-by: Madhusoodan P <akshayapataki123@gmail.com>
Co-authored-by: Luca Kovacs <lkovacs@cloudera.com>
Signed-off-by: Duo Zhang <zhangduo@apache.org>
This commit is contained in:
Luca Kovács 2022-08-24 04:54:27 +02:00 committed by GitHub
parent a0481d100f
commit f9ea7ee0d6
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 392 additions and 11 deletions

View File

@ -1786,6 +1786,14 @@ possible configurations would overwhelm and obscure the important.
ThreadPool. ThreadPool.
</description> </description>
</property> </property>
<property>
<name>hbase.http.metrics.servlets</name>
<value>jmx,metrics,prometheus</value>
<description>
Comma separated list of servlet names to enable for metrics collection. Supported
servlets are jmx, metrics, prometheus
</description>
</property>
<property> <property>
<name>hbase.replication.rpc.codec</name> <name>hbase.replication.rpc.codec</name>
<value>org.apache.hadoop.hbase.codec.KeyValueCodecWithTags</value> <value>org.apache.hadoop.hbase.codec.KeyValueCodecWithTags</value>

View File

@ -0,0 +1,42 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.impl;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public final class MetricsExportHelper {
private MetricsExportHelper() {
}
public static Collection<MetricsRecord> export() {
MetricsSystemImpl instance = (MetricsSystemImpl) DefaultMetricsSystem.instance();
MetricsBuffer metricsBuffer = instance.sampleMetrics();
List<MetricsRecord> metrics = new ArrayList<>();
for (MetricsBuffer.Entry entry : metricsBuffer) {
entry.records().forEach(metrics::add);
}
return metrics;
}
}

View File

@ -0,0 +1,75 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.metrics;
import java.util.Collection;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MetricsTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.impl.MetricsExportHelper;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.junit.Assert;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@Category({ MetricsTests.class, SmallTests.class })
public class TestMetricsExportHelper {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestMetricsExportHelper.class);
@Test
public void testExportHelper() {
DefaultMetricsSystem.initialize("exportHelperTestSystem");
DefaultMetricsSystem.instance().start();
String metricsName = "exportMetricsTestGrp";
String gaugeName = "exportMetricsTestGauge";
String counterName = "exportMetricsTestCounter";
BaseSourceImpl baseSource = new BaseSourceImpl(metricsName, "", metricsName, metricsName);
baseSource.setGauge(gaugeName, 0);
baseSource.incCounters(counterName, 1);
Collection<MetricsRecord> metrics = MetricsExportHelper.export();
DefaultMetricsSystem.instance().stop();
Assert.assertTrue(metrics.stream().anyMatch(mr -> mr.name().equals(metricsName)));
Assert.assertTrue(gaugeName + " is missing in the export",
contains(metrics, metricsName, gaugeName));
Assert.assertTrue(counterName + " is missing in the export",
contains(metrics, metricsName, counterName));
}
private boolean contains(Collection<MetricsRecord> metrics, String metricsName,
String metricName) {
return metrics.stream().filter(mr -> mr.name().equals(metricsName)).anyMatch(mr -> {
for (AbstractMetric metric : mr.metrics()) {
if (metric.name().equals(metricName)) {
return true;
}
}
return false;
});
}
}

View File

@ -57,6 +57,19 @@
<type>test-jar</type> <type>test-jar</type>
<scope>test</scope> <scope>test</scope>
</dependency> </dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-metrics-api</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-metrics</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop-compat</artifactId>
</dependency>
<!-- resource bundle only needed at build time --> <!-- resource bundle only needed at build time -->
<dependency> <dependency>
<groupId>org.apache.hbase</groupId> <groupId>org.apache.hbase</groupId>

View File

@ -53,7 +53,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.http.conf.ConfServlet; import org.apache.hadoop.hbase.http.conf.ConfServlet;
import org.apache.hadoop.hbase.http.jmx.JMXJsonServlet;
import org.apache.hadoop.hbase.http.log.LogLevel; import org.apache.hadoop.hbase.http.log.LogLevel;
import org.apache.hadoop.hbase.util.ReflectionUtils; import org.apache.hadoop.hbase.util.ReflectionUtils;
import org.apache.hadoop.hbase.util.Threads; import org.apache.hadoop.hbase.util.Threads;
@ -70,6 +69,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.common.base.Preconditions; import org.apache.hbase.thirdparty.com.google.common.base.Preconditions;
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hbase.thirdparty.com.google.common.collect.Lists; import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion; import org.apache.hbase.thirdparty.org.eclipse.jetty.http.HttpVersion;
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Handler; import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Handler;
@ -154,6 +154,18 @@ public class HttpServer implements FilterContainer {
public static final String NO_CACHE_FILTER = "NoCacheFilter"; public static final String NO_CACHE_FILTER = "NoCacheFilter";
public static final String APP_DIR = "webapps"; public static final String APP_DIR = "webapps";
public static final String METRIC_SERVLETS_CONF_KEY = "hbase.http.metrics.servlets";
public static final String[] METRICS_SERVLETS_DEFAULT = { "jmx", "metrics", "prometheus" };
private static final ImmutableMap<String,
ServletConfig> METRIC_SERVLETS = new ImmutableMap.Builder<String, ServletConfig>()
.put("jmx",
new ServletConfig("jmx", "/jmx", "org.apache.hadoop.hbase.http.jmx.JMXJsonServlet"))
.put("metrics",
new ServletConfig("metrics", "/metrics", "org.apache.hadoop.metrics.MetricsServlet"))
.put("prometheus", new ServletConfig("prometheus", "/prometheus",
"org.apache.hadoop.hbase.http.prometheus.PrometheusHadoopServlet"))
.build();
private final AccessControlList adminsAcl; private final AccessControlList adminsAcl;
protected final Server webServer; protected final Server webServer;
@ -751,16 +763,7 @@ public class HttpServer implements FilterContainer {
// set up default servlets // set up default servlets
addPrivilegedServlet("stacks", "/stacks", StackServlet.class); addPrivilegedServlet("stacks", "/stacks", StackServlet.class);
addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class); addPrivilegedServlet("logLevel", "/logLevel", LogLevel.Servlet.class);
// Hadoop3 has moved completely to metrics2, and dropped support for Metrics v1's
// MetricsServlet (see HADOOP-12504). We'll using reflection to load if against hadoop2.
// Remove when we drop support for hbase on hadoop2.x.
try {
Class<?> clz = Class.forName("org.apache.hadoop.metrics.MetricsServlet");
addPrivilegedServlet("metrics", "/metrics", clz.asSubclass(HttpServlet.class));
} catch (Exception e) {
// do nothing
}
addPrivilegedServlet("jmx", "/jmx", JMXJsonServlet.class);
// While we don't expect users to have sensitive information in their configuration, they // While we don't expect users to have sensitive information in their configuration, they
// might. Give them an option to not expose the service configuration to all users. // might. Give them an option to not expose the service configuration to all users.
if (conf.getBoolean(HTTP_PRIVILEGED_CONF_KEY, HTTP_PRIVILEGED_CONF_DEFAULT)) { if (conf.getBoolean(HTTP_PRIVILEGED_CONF_KEY, HTTP_PRIVILEGED_CONF_DEFAULT)) {
@ -784,6 +787,22 @@ public class HttpServer implements FilterContainer {
LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property " LOG.info("ASYNC_PROFILER_HOME environment variable and async.profiler.home system property "
+ "not specified. Disabling /prof endpoint."); + "not specified. Disabling /prof endpoint.");
} }
/* register metrics servlets */
String[] enabledServlets = conf.getStrings(METRIC_SERVLETS_CONF_KEY, METRICS_SERVLETS_DEFAULT);
for (String enabledServlet : enabledServlets) {
try {
ServletConfig servletConfig = METRIC_SERVLETS.get(enabledServlet);
if (servletConfig != null) {
Class<?> clz = Class.forName(servletConfig.getClazz());
addPrivilegedServlet(servletConfig.getName(), servletConfig.getPathSpec(),
clz.asSubclass(HttpServlet.class));
}
} catch (Exception e) {
/* shouldn't be fatal, so warn the user about it */
LOG.warn("Couldn't register the servlet " + enabledServlet, e);
}
}
} }
/** /**

View File

@ -0,0 +1,47 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http;
import org.apache.yetus.audience.InterfaceAudience;
/* pojo to hold the servlet info */
@InterfaceAudience.Private
class ServletConfig {
private String name;
private String pathSpec;
private String clazz;
public ServletConfig(String name, String pathSpec, String clazz) {
this.name = name;
this.pathSpec = pathSpec;
this.clazz = clazz;
}
public String getName() {
return name;
}
public String getPathSpec() {
return pathSpec;
}
public String getClazz() {
return clazz;
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http.prometheus;
import com.google.errorprone.annotations.RestrictedApi;
import java.io.IOException;
import java.io.Writer;
import java.util.Collection;
import java.util.regex.Pattern;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.metrics2.AbstractMetric;
import org.apache.hadoop.metrics2.MetricType;
import org.apache.hadoop.metrics2.MetricsRecord;
import org.apache.hadoop.metrics2.MetricsTag;
import org.apache.hadoop.metrics2.impl.MetricsExportHelper;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public class PrometheusHadoopServlet extends HttpServlet {
private static final Pattern SPLIT_PATTERN =
Pattern.compile("(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=([A-Z][a-z]))|\\W|(_)+");
@Override
protected void doGet(HttpServletRequest req, HttpServletResponse resp) throws IOException {
writeMetrics(resp.getWriter());
}
static String toPrometheusName(String metricRecordName, String metricName) {
String baseName = metricRecordName + StringUtils.capitalize(metricName);
String[] parts = SPLIT_PATTERN.split(baseName);
return String.join("_", parts).toLowerCase();
}
/*
* SimpleClient for Prometheus is not used, because the format is very easy to implement and this
* solution doesn't add any dependencies to the project. You can check the Prometheus format here:
* https://prometheus.io/docs/instrumenting/exposition_formats/
*/
@RestrictedApi(explanation = "Should only be called in tests or self", link = "",
allowedOnPath = ".*/src/test/.*|.*/PrometheusHadoopServlet\\.java")
void writeMetrics(Writer writer) throws IOException {
Collection<MetricsRecord> metricRecords = MetricsExportHelper.export();
for (MetricsRecord metricsRecord : metricRecords) {
for (AbstractMetric metrics : metricsRecord.metrics()) {
if (metrics.type() == MetricType.COUNTER || metrics.type() == MetricType.GAUGE) {
String key = toPrometheusName(metricsRecord.name(), metrics.name());
writer.append("# TYPE ").append(key).append(" ")
.append(metrics.type().toString().toLowerCase()).append("\n").append(key).append("{");
/* add tags */
String sep = "";
for (MetricsTag tag : metricsRecord.tags()) {
String tagName = tag.name().toLowerCase();
writer.append(sep).append(tagName).append("=\"").append(tag.value()).append("\"");
sep = ",";
}
writer.append("} ");
writer.append(metrics.value().toString()).append('\n');
}
}
}
writer.flush();
}
}

View File

@ -0,0 +1,84 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http.prometheus;
import static java.nio.charset.StandardCharsets.UTF_8;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.OutputStreamWriter;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.junit.Assert;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test prometheus Sink.
*/
@Category({ SmallTests.class, MiscTests.class })
public class TestPrometheusServlet {
@ClassRule
public static final HBaseClassTestRule CLASS_TEST_RULE =
HBaseClassTestRule.forClass(TestPrometheusServlet.class);
@Test
public void testPublish() throws IOException {
// GIVEN
MetricsSystem metrics = DefaultMetricsSystem.instance();
metrics.init("test");
TestMetrics testMetrics = metrics.register("TestMetrics", "Testing metrics", new TestMetrics());
metrics.start();
testMetrics.numBucketCreateFails.incr();
metrics.publishMetricsNow();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
OutputStreamWriter writer = new OutputStreamWriter(stream, UTF_8);
// WHEN
PrometheusHadoopServlet prom2Servlet = new PrometheusHadoopServlet();
prom2Servlet.writeMetrics(writer);
// THEN
String writtenMetrics = stream.toString(UTF_8.name());
System.out.println(writtenMetrics);
Assert.assertTrue("The expected metric line is missing from prometheus metrics output",
writtenMetrics.contains("test_metrics_num_bucket_create_fails{context=\"dfs\""));
metrics.stop();
metrics.shutdown();
}
/**
* Example metric pojo.
*/
@Metrics(about = "Test Metrics", context = "dfs")
private static class TestMetrics {
@Metric
private MutableCounterLong numBucketCreateFails;
}
}

View File

@ -1610,6 +1610,15 @@ See link:https://hadoop.apache.org/docs/current/api/org/apache/hadoop/metrics2/p
To disable metrics for a region server, edit the _conf/hadoop-metrics2-hbase.properties_ file and comment out any uncommented lines. To disable metrics for a region server, edit the _conf/hadoop-metrics2-hbase.properties_ file and comment out any uncommented lines.
Restart the region server for the changes to take effect. Restart the region server for the changes to take effect.
[[enabling.metrics.servlets]]
=== Enabling Metrics Servlets
HBase exposes the metrics in many formats such as JSON, prometheus-format through different servlets (`/jmx`, `/metrics`, `/prometheus`). Any of these servlets can be enabled or disabled by the configuration property `hbase.http.metrics.servlets`. The value for the property should be a comma separated list of the servlet aliases which are `{jmx, metrics, prometheus}`. `/jmx`, `/metrics`, `/prometheus` are enabled by default. To get metrics using these servlets access the URL `http://SERVER_HOSTNAME:SERVER_WEB_UI_PORT/endpoint`. Where endpoint is one of {`/jmx`, `/metrics`, `/prometheus`}. Eg. `http://my.rs.xyz.com:16030/prometheus`
[[prometheus.format.metrics]]
=== Prometheus servlets
HBase exposes the metrics in prometheus friendly format through a servlet, `/prometheus`. Currently `/prometheus` exposes all the available metrics.
[[discovering.available.metrics]] [[discovering.available.metrics]]
=== Discovering Available Metrics === Discovering Available Metrics