HBASE-25895 Implement a Cluster Metrics JSON endpoint

Publishes a set of JSON endpoints following a RESTful structure, which expose a subset of the
`o.a.h.h.ClusterMetrics` object tree. The URI structure is as follows

    /api/v1/admin/cluster_metrics
    /api/v1/admin/cluster_metrics/live_servers
    /api/v1/admin/cluster_metrics/dead_servers

Signed-off-by: Sean Busbey <busbey@apache.org>
Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
Nick Dimiduk 2021-05-14 18:07:03 -07:00 committed by Nick Dimiduk
parent 5851400a46
commit be0afbf23a
19 changed files with 1112 additions and 11 deletions

View File

@ -39,6 +39,7 @@ import java.util.stream.Collectors;
import javax.servlet.Filter;
import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
import javax.servlet.Servlet;
import javax.servlet.ServletContext;
import javax.servlet.ServletException;
import javax.servlet.ServletRequest;
@ -838,6 +839,17 @@ public class HttpServer implements FilterContainer {
addServletWithAuth(name, pathSpec, clazz, false);
}
/**
* Adds a servlet in the server that any user can access. This method differs from
* {@link #addPrivilegedServlet(String, ServletHolder)} in that any authenticated user
* can interact with the servlet added by this method.
* @param pathSpec The path spec for the servlet
* @param holder The servlet holder
*/
public void addUnprivilegedServlet(String pathSpec, ServletHolder holder) {
addServletWithAuth(pathSpec, holder, false);
}
/**
* Adds a servlet in the server that only administrators can access. This method differs from
* {@link #addUnprivilegedServlet(String, String, Class)} in that only those authenticated user
@ -848,6 +860,16 @@ public class HttpServer implements FilterContainer {
addServletWithAuth(name, pathSpec, clazz, true);
}
/**
* Adds a servlet in the server that only administrators can access. This method differs from
* {@link #addUnprivilegedServlet(String, ServletHolder)} in that only those
* authenticated user who are identified as administrators can interact with the servlet added by
* this method.
*/
public void addPrivilegedServlet(String pathSpec, ServletHolder holder) {
addServletWithAuth(pathSpec, holder, true);
}
/**
* Internal method to add a servlet to the HTTP server. Developers should not call this method
* directly, but invoke it via {@link #addUnprivilegedServlet(String, String, Class)} or
@ -859,6 +881,16 @@ public class HttpServer implements FilterContainer {
addFilterPathMapping(pathSpec, webAppContext);
}
/**
* Internal method to add a servlet to the HTTP server. Developers should not call this method
* directly, but invoke it via {@link #addUnprivilegedServlet(String, ServletHolder)} or
* {@link #addPrivilegedServlet(String, ServletHolder)}.
*/
void addServletWithAuth(String pathSpec, ServletHolder holder, boolean requireAuthz) {
addInternalServlet(pathSpec, holder, requireAuthz);
addFilterPathMapping(pathSpec, webAppContext);
}
/**
* Add an internal servlet in the server, specifying whether or not to
* protect with Kerberos authentication.
@ -867,17 +899,33 @@ public class HttpServer implements FilterContainer {
* servlets added using this method, filters (except internal Kerberos
* filters) are not enabled.
*
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
* @param requireAuth Require Kerberos authenticate to access servlet
* @param name The name of the {@link Servlet} (can be passed as null)
* @param pathSpec The path spec for the {@link Servlet}
* @param clazz The {@link Servlet} class
* @param requireAuthz Require Kerberos authenticate to access servlet
*/
void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuthz) {
Class<? extends HttpServlet> clazz, boolean requireAuthz) {
ServletHolder holder = new ServletHolder(clazz);
if (name != null) {
holder.setName(name);
}
addInternalServlet(pathSpec, holder, requireAuthz);
}
/**
* Add an internal servlet in the server, specifying whether or not to
* protect with Kerberos authentication.
* Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For
* servlets added using this method, filters (except internal Kerberos
* filters) are not enabled.
*
* @param pathSpec The path spec for the {@link Servlet}
* @param holder The object providing the {@link Servlet} instance
* @param requireAuthz Require Kerberos authenticate to access servlet
*/
void addInternalServlet(String pathSpec, ServletHolder holder, boolean requireAuthz) {
if (authenticationEnabled && requireAuthz) {
FilterHolder filter = new FilterHolder(AdminAuthorizedFilter.class);
filter.setName(AdminAuthorizedFilter.class.getSimpleName());

View File

@ -1,4 +1,4 @@
/**
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
@ -19,18 +19,16 @@ package org.apache.hadoop.hbase.http;
import java.io.IOException;
import java.net.URI;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.net.HostAndPort;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
/**
* Create a Jetty embedded server to answer http requests. The primary goal
@ -128,6 +126,7 @@ public class InfoServer {
}
/**
* Adds a servlet in the server that any user can access.
* @see HttpServer#addUnprivilegedServlet(String, String, Class)
*/
public void addUnprivilegedServlet(String name, String pathSpec,
@ -136,6 +135,18 @@ public class InfoServer {
}
/**
* Adds a servlet in the server that any user can access.
* @see HttpServer#addUnprivilegedServlet(String, ServletHolder)
*/
public void addUnprivilegedServlet(String name, String pathSpec, ServletHolder holder) {
if (name != null) {
holder.setName(name);
}
this.httpServer.addUnprivilegedServlet(pathSpec, holder);
}
/**
* Adds a servlet in the server that any user can access.
* @see HttpServer#addPrivilegedServlet(String, String, Class)
*/
public void addPrivilegedServlet(String name, String pathSpec,

View File

@ -0,0 +1,38 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http.gson;
import java.lang.reflect.Type;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.gson.JsonElement;
import org.apache.hbase.thirdparty.com.google.gson.JsonPrimitive;
import org.apache.hbase.thirdparty.com.google.gson.JsonSerializationContext;
import org.apache.hbase.thirdparty.com.google.gson.JsonSerializer;
/**
* Serialize a {@code byte[]} using {@link Bytes#toString()}.
*/
@InterfaceAudience.Private
public final class ByteArraySerializer implements JsonSerializer<byte[]> {
@Override
public JsonElement serialize(byte[] src, Type typeOfSrc, JsonSerializationContext context) {
return new JsonPrimitive(Bytes.toString(src));
}
}

View File

@ -0,0 +1,99 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http.gson;
import java.io.IOException;
import java.io.OutputStream;
import java.io.OutputStreamWriter;
import java.io.Writer;
import java.lang.annotation.Annotation;
import java.lang.reflect.Type;
import java.nio.charset.Charset;
import java.nio.charset.IllegalCharsetNameException;
import java.nio.charset.StandardCharsets;
import java.nio.charset.UnsupportedCharsetException;
import java.util.Optional;
import javax.inject.Inject;
import org.apache.yetus.audience.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hbase.thirdparty.com.google.gson.Gson;
import org.apache.hbase.thirdparty.javax.ws.rs.Produces;
import org.apache.hbase.thirdparty.javax.ws.rs.WebApplicationException;
import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType;
import org.apache.hbase.thirdparty.javax.ws.rs.core.MultivaluedMap;
import org.apache.hbase.thirdparty.javax.ws.rs.ext.MessageBodyWriter;
/**
* Implements JSON serialization via {@link Gson} for JAX-RS.
*/
@InterfaceAudience.Private
@Produces(MediaType.APPLICATION_JSON)
public final class GsonMessageBodyWriter<T> implements MessageBodyWriter<T> {
private static final Logger logger = LoggerFactory.getLogger(GsonMessageBodyWriter.class);
private final Gson gson;
@Inject
public GsonMessageBodyWriter(Gson gson) {
this.gson = gson;
}
@Override
public boolean isWriteable(Class<?> type, Type genericType, Annotation[] annotations,
MediaType mediaType) {
return mediaType == null || MediaType.APPLICATION_JSON_TYPE.isCompatible(mediaType);
}
@Override
public void writeTo(
T t,
Class<?> type,
Type genericType,
Annotation[] annotations,
MediaType mediaType,
MultivaluedMap<String, Object> httpHeaders,
OutputStream entityStream
) throws IOException, WebApplicationException {
final Charset outputCharset = requestedCharset(mediaType);
try (Writer writer = new OutputStreamWriter(entityStream, outputCharset)) {
gson.toJson(t, writer);
}
}
private static Charset requestedCharset(MediaType mediaType) {
return Optional.ofNullable(mediaType)
.map(MediaType::getParameters)
.map(params -> params.get("charset"))
.map(c -> {
try {
return Charset.forName(c);
} catch (IllegalCharsetNameException e) {
logger.debug("Client requested illegal Charset '{}'", c);
return null;
} catch (UnsupportedCharsetException e) {
logger.debug("Client requested unsupported Charset '{}'", c);
return null;
} catch (Exception e) {
logger.debug("Error while resolving Charset '{}'", c, e);
return null;
}
})
.orElse(StandardCharsets.UTF_8);
}
}

View File

@ -0,0 +1,53 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http.jersey;
import java.io.IOException;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.collect.ImmutableMap;
import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerRequestContext;
import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerResponseContext;
import org.apache.hbase.thirdparty.javax.ws.rs.container.ContainerResponseFilter;
import org.apache.hbase.thirdparty.javax.ws.rs.core.Response.Status;
/**
* Generate a uniform response wrapper around the Entity returned from the resource.
* @see <a href="https://jsonapi.org/format/#document-top-level">JSON API Document Structure</a>
* @see <a href="https://jsonapi.org/format/#error-objects">JSON API Error Objects</a>
*/
@InterfaceAudience.Private
public class ResponseEntityMapper implements ContainerResponseFilter {
@Override
public void filter(
ContainerRequestContext requestContext,
ContainerResponseContext responseContext
) throws IOException {
/*
* Follows very loosely the top-level document specification described in by JSON API. Only
* handles 200 response codes; leaves room for errors and other response types.
*/
final int statusCode = responseContext.getStatus();
if (Status.OK.getStatusCode() != statusCode) {
return;
}
responseContext.setEntity(ImmutableMap.of("data", responseContext.getEntity()));
}
}

View File

@ -0,0 +1,42 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.http.jersey;
import java.util.function.Supplier;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.org.glassfish.hk2.api.Factory;
/**
* Use a {@link Supplier} of type {@code T} as a {@link Factory} that provides instances of
* {@code T}. Modeled after Jersey's internal implementation.
*/
@InterfaceAudience.Private
public class SupplierFactoryAdapter<T> implements Factory<T> {
private final Supplier<T> supplier;
public SupplierFactoryAdapter(Supplier<T> supplier) {
this.supplier = supplier;
}
@Override public T provide() {
return supplier.get();
}
@Override public void dispose(T instance) { }
}

View File

@ -131,6 +131,7 @@ import org.apache.hadoop.hbase.master.cleaner.SnapshotCleanerChore;
import org.apache.hadoop.hbase.master.http.MasterDumpServlet;
import org.apache.hadoop.hbase.master.http.MasterRedirectServlet;
import org.apache.hadoop.hbase.master.http.MasterStatusServlet;
import org.apache.hadoop.hbase.master.http.api_v1.ResourceConfigFactory;
import org.apache.hadoop.hbase.master.janitor.CatalogJanitor;
import org.apache.hadoop.hbase.master.locking.LockManager;
import org.apache.hadoop.hbase.master.migrate.RollingUpgradeChore;
@ -257,7 +258,8 @@ import org.apache.hbase.thirdparty.org.eclipse.jetty.server.Server;
import org.apache.hbase.thirdparty.org.eclipse.jetty.server.ServerConnector;
import org.apache.hbase.thirdparty.org.eclipse.jetty.servlet.ServletHolder;
import org.apache.hbase.thirdparty.org.eclipse.jetty.webapp.WebAppContext;
import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
import org.apache.hbase.thirdparty.org.glassfish.jersey.servlet.ServletContainer;
import org.apache.hadoop.hbase.shaded.protobuf.RequestConverter;
import org.apache.hadoop.hbase.shaded.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.SnapshotProtos.SnapshotDescription;
@ -281,7 +283,7 @@ public class HMaster extends HBaseServerBase<MasterRpcServices> implements Maste
private static final Logger LOG = LoggerFactory.getLogger(HMaster.class);
// MASTER is name of the webapp and the attribute name used stuffing this
//instance into web context.
// instance into a web context !! AND OTHER PLACES !!
public static final String MASTER = "master";
// Manager and zk listener for master election
@ -689,9 +691,16 @@ public class HMaster extends HBaseServerBase<MasterRpcServices> implements Maste
@Override
protected void configureInfoServer(InfoServer infoServer) {
infoServer.addUnprivilegedServlet("master-status", "/master-status", MasterStatusServlet.class);
infoServer.addUnprivilegedServlet("api_v1", "/api/v1/*", buildApiV1Servlet());
infoServer.setAttribute(MASTER, this);
}
private ServletHolder buildApiV1Servlet() {
final ResourceConfig config = ResourceConfigFactory.createResourceConfig(conf, this);
return new ServletHolder(new ServletContainer(config));
}
@Override
protected Class<? extends HttpServlet> getDumpServlet() {
return MasterDumpServlet.class;

View File

@ -0,0 +1,59 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.api_v1;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.http.jersey.ResponseEntityMapper;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.http.gson.GsonSerializationFeature;
import org.apache.hadoop.hbase.master.http.jersey.MasterFeature;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ResourceConfig;
import org.apache.hbase.thirdparty.org.glassfish.jersey.server.ServerProperties;
import org.apache.hbase.thirdparty.org.glassfish.jersey.server.TracingConfig;
/**
* Encapsulates construction and configuration of the {@link ResourceConfig} that implements
* the {@code cluster-metrics} endpoints.
*/
@InterfaceAudience.Private
public final class ResourceConfigFactory {
private ResourceConfigFactory() {}
public static ResourceConfig createResourceConfig(Configuration conf, HMaster master) {
return new ResourceConfig()
.setApplicationName("api_v1")
.packages(ResourceConfigFactory.class.getPackage().getName())
// TODO: anything registered here that does not have necessary bindings won't inject properly
// at annotation sites and will result in a WARN logged by o.a.h.t.o.g.j.i.inject.Providers.
// These warnings should be treated by the service as fatal errors, but I have not found a
// callback API for registering a failed binding handler.
.register(ResponseEntityMapper.class)
.register(GsonSerializationFeature.class)
.register(new MasterFeature(master))
// devs: enable TRACING to see how jersey is dispatching to resources.
// in hbase-site.xml, set 'hbase.http.jersey.tracing.type=ON_DEMAND` and
// to curl, add `-H X-Jersey-Tracing-Accept:true`
.property(ServerProperties.TRACING, conf.get(
"hbase.http.jersey.tracing.type", TracingConfig.OFF.name()))
.property(ServerProperties.TRACING_THRESHOLD, conf.get(
"hbase.http.jersey.tracing.threshold", "TRACE"));
}
}

View File

@ -0,0 +1,70 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.api_v1.cluster_metrics.model;
import java.util.List;
import org.apache.hadoop.hbase.ServerName;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Exposes a subset of fields from {@link org.apache.hadoop.hbase.ClusterMetrics}.
*/
@InterfaceAudience.Private
public final class ClusterMetrics {
private final String hbaseVersion;
private final String clusterId;
private final ServerName masterName;
private final List<ServerName> backupMasterNames;
public static ClusterMetrics from(org.apache.hadoop.hbase.ClusterMetrics clusterMetrics) {
return new ClusterMetrics(
clusterMetrics.getHBaseVersion(),
clusterMetrics.getClusterId(),
clusterMetrics.getMasterName(),
clusterMetrics.getBackupMasterNames());
}
private ClusterMetrics(
String hbaseVersion,
String clusterId,
ServerName masterName,
List<ServerName> backupMasterNames
) {
this.hbaseVersion = hbaseVersion;
this.clusterId = clusterId;
this.masterName = masterName;
this.backupMasterNames = backupMasterNames;
}
public String getHBaseVersion() {
return hbaseVersion;
}
public String getClusterId() {
return clusterId;
}
public ServerName getMasterName() {
return masterName;
}
public List<ServerName> getBackupMasterNames() {
return backupMasterNames;
}
}

View File

@ -0,0 +1,27 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Exposes the {@link org.apache.hadoop.hbase.ClusterMetrics} object over HTTP as a REST
* resource hierarchy. Intended for Master
* {@link org.apache.hadoop.hbase.http.InfoServer} consumption only.
*/
@InterfaceAudience.Private
package org.apache.hadoop.hbase.master.http.api_v1.cluster_metrics;
import org.apache.yetus.audience.InterfaceAudience;

View File

@ -0,0 +1,87 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.api_v1.cluster_metrics.resource;
import java.util.Collection;
import java.util.EnumSet;
import java.util.List;
import java.util.concurrent.ExecutionException;
import javax.inject.Inject;
import org.apache.hadoop.hbase.ClusterMetrics.Option;
import org.apache.hadoop.hbase.ServerMetrics;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.master.http.api_v1.cluster_metrics.model.ClusterMetrics;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.javax.ws.rs.GET;
import org.apache.hbase.thirdparty.javax.ws.rs.Path;
import org.apache.hbase.thirdparty.javax.ws.rs.Produces;
import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType;
/**
* The root object exposing a subset of {@link org.apache.hadoop.hbase.ClusterMetrics}.
*/
@InterfaceAudience.Private
@Path("cluster_metrics")
@Produces({ MediaType.APPLICATION_JSON })
public class ClusterMetricsResource {
// TODO: using the async client API lends itself well to using the JAX-RS 2.0 Spec's asynchronous
// server APIs. However, these are only available when Jersey is wired up using Servlet 3.x
// container and all of our existing InfoServer stuff is build on Servlet 2.x.
// See also https://blog.allegro.tech/2014/10/async-rest.html#mixing-with-completablefuture
private final AsyncAdmin admin;
@Inject
public ClusterMetricsResource(MasterServices master) {
this.admin = master.getAsyncConnection().getAdmin();
}
private org.apache.hadoop.hbase.ClusterMetrics get(EnumSet<Option> fields)
throws ExecutionException, InterruptedException {
return admin.getClusterMetrics(fields).get();
}
@GET
@Path("/")
public ClusterMetrics getBaseMetrics() throws ExecutionException, InterruptedException {
final EnumSet<Option> fields = EnumSet.of(
Option.HBASE_VERSION,
Option.CLUSTER_ID,
Option.MASTER,
Option.BACKUP_MASTERS
);
return ClusterMetrics.from(get(fields));
}
@GET
@Path("/live_servers")
public Collection<ServerMetrics> getLiveServers() throws ExecutionException,
InterruptedException {
return get(EnumSet.of(Option.LIVE_SERVERS)).getLiveServerMetrics().values();
}
@GET
@Path("/dead_servers")
public List<ServerName> getDeadServers() throws ExecutionException,
InterruptedException {
return get(EnumSet.of(Option.DEAD_SERVERS)).getDeadServerNames();
}
}

View File

@ -0,0 +1,43 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.gson;
import org.apache.hadoop.hbase.Size;
import org.apache.hadoop.hbase.http.gson.ByteArraySerializer;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.gson.FieldNamingPolicy;
import org.apache.hbase.thirdparty.com.google.gson.Gson;
import org.apache.hbase.thirdparty.com.google.gson.GsonBuilder;
/**
* Provides a singleton {@link Gson} instance configured just the way we like it.
*/
@InterfaceAudience.Private
public final class GsonFactory {
private GsonFactory() {}
public static Gson buildGson() {
return new GsonBuilder()
.setFieldNamingPolicy(FieldNamingPolicy.LOWER_CASE_WITH_UNDERSCORES)
.enableComplexMapKeySerialization()
.registerTypeAdapter(byte[].class, new ByteArraySerializer())
.registerTypeAdapter(Size.class, new SizeAsBytesSerializer())
.create();
}
}

View File

@ -0,0 +1,67 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.gson;
import java.util.function.Supplier;
import javax.inject.Singleton;
import org.apache.hadoop.hbase.http.gson.GsonMessageBodyWriter;
import org.apache.hadoop.hbase.http.jersey.SupplierFactoryAdapter;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.gson.Gson;
import org.apache.hbase.thirdparty.javax.ws.rs.core.Feature;
import org.apache.hbase.thirdparty.javax.ws.rs.core.FeatureContext;
import org.apache.hbase.thirdparty.javax.ws.rs.ext.MessageBodyWriter;
import org.apache.hbase.thirdparty.org.glassfish.hk2.utilities.binding.AbstractBinder;
import org.apache.hbase.thirdparty.org.glassfish.hk2.utilities.binding.ServiceBindingBuilder;
/**
* Used to register with (shaded) Jersey the presence of Entity serialization using (shaded) Gson.
*/
@InterfaceAudience.Private
public class GsonSerializationFeature implements Feature {
@Override
public boolean configure(FeatureContext context) {
context.register(new Binder());
return true;
}
/**
* Register this feature's provided functionality and defines their lifetime scopes.
*/
private static class Binder extends AbstractBinder {
@Override
protected void configure() {
bindFactory(GsonFactory::buildGson)
.to(Gson.class)
.in(Singleton.class);
bind(GsonMessageBodyWriter.class)
.to(MessageBodyWriter.class)
.in(Singleton.class);
}
/**
* Helper method for smoothing over use of {@link SupplierFactoryAdapter}. Inspired by internal
* implementation details of jersey itself.
*/
private <T> ServiceBindingBuilder<T> bindFactory(Supplier<T> supplier) {
return bindFactory(new SupplierFactoryAdapter<>(supplier));
}
}
}

View File

@ -0,0 +1,38 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.gson;
import java.lang.reflect.Type;
import org.apache.hadoop.hbase.Size;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.gson.JsonElement;
import org.apache.hbase.thirdparty.com.google.gson.JsonPrimitive;
import org.apache.hbase.thirdparty.com.google.gson.JsonSerializationContext;
import org.apache.hbase.thirdparty.com.google.gson.JsonSerializer;
/**
* Simplify representation of a {@link Size} instance by converting to bytes.
*/
@InterfaceAudience.Private
final class SizeAsBytesSerializer implements JsonSerializer<Size> {
@Override
public JsonElement serialize(Size src, Type typeOfSrc, JsonSerializationContext context) {
return new JsonPrimitive(src.get(Size.Unit.BYTE));
}
}

View File

@ -0,0 +1,24 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* This package should be in the hbase-http module as {@code a.a.h.h.http.gson}. It is here instead
* because hbase-http does not currently have a dependency on hbase-client, which is required for
* implementing {@link org.apache.hadoop.hbase.master.http.gson.SizeAsBytesSerializer}.
*/
package org.apache.hadoop.hbase.master.http.gson;

View File

@ -0,0 +1,73 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.jersey;
import java.util.function.Supplier;
import javax.inject.Singleton;
import org.apache.hadoop.hbase.http.jersey.SupplierFactoryAdapter;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.javax.ws.rs.core.Feature;
import org.apache.hbase.thirdparty.javax.ws.rs.core.FeatureContext;
import org.apache.hbase.thirdparty.org.glassfish.hk2.utilities.binding.AbstractBinder;
import org.apache.hbase.thirdparty.org.glassfish.hk2.utilities.binding.ServiceBindingBuilder;
/**
* Implements a Singleton binding to the provided instance of {@link HMaster} for both
* {@link HMaster} and {@link MasterServices} injections.
*/
@InterfaceAudience.Private
public class MasterFeature implements Feature {
private final Supplier<HMaster> supplier;
public MasterFeature(HMaster master) {
this.supplier = () -> master;
}
@Override
public boolean configure(FeatureContext context) {
context.register(new Binder());
return true;
}
/**
* Register this feature's provided functionality and defines their lifetime scopes.
*/
private class Binder extends AbstractBinder {
@Override
protected void configure() {
bindFactory(supplier)
.to(HMaster.class)
.in(Singleton.class);
bindFactory(supplier)
.to(MasterServices.class)
.in(Singleton.class);
}
/**
* Helper method for smoothing over use of {@link SupplierFactoryAdapter}. Inspired by internal
* implementation details of jersey itself.
*/
private <T> ServiceBindingBuilder<T> bindFactory(Supplier<T> supplier) {
return bindFactory(new SupplierFactoryAdapter<>(supplier));
}
}
}

View File

@ -19,6 +19,7 @@ package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
@ -75,6 +76,10 @@ public final class MiniClusterRule extends ExternalResource {
return this;
}
public Builder setConfiguration(Supplier<Configuration> supplier) {
return setConfiguration(supplier.get());
}
public MiniClusterRule build() {
return new MiniClusterRule(conf, miniClusterOption != null ? miniClusterOption :
StartTestingClusterOption.builder().build());

View File

@ -0,0 +1,203 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.containsString;
import static org.hamcrest.Matchers.endsWith;
import static org.hamcrest.Matchers.startsWith;
import static org.junit.Assert.assertThrows;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ConnectionRule;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MiniClusterRule;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.StartTestingClusterOption;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.Durability;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.http.api_v1.cluster_metrics.resource.ClusterMetricsResource;
import org.apache.hadoop.hbase.testclassification.LargeTests;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.ExternalResource;
import org.junit.rules.RuleChain;
import org.apache.hbase.thirdparty.javax.ws.rs.NotAcceptableException;
import org.apache.hbase.thirdparty.javax.ws.rs.client.Client;
import org.apache.hbase.thirdparty.javax.ws.rs.client.ClientBuilder;
import org.apache.hbase.thirdparty.javax.ws.rs.client.WebTarget;
import org.apache.hbase.thirdparty.javax.ws.rs.core.MediaType;
/**
* Tests for the master api_v1 {@link ClusterMetricsResource}.
*/
@Category({ MasterTests.class, LargeTests.class})
public class TestApiV1ClusterMetricsResource {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(TestApiV1ClusterMetricsResource.class);
private static final MiniClusterRule miniClusterRule = MiniClusterRule.newBuilder()
.setMiniClusterOption(StartTestingClusterOption.builder()
.numZkServers(3)
.numMasters(3)
.numDataNodes(3)
.build())
.setConfiguration(() -> {
// enable Master InfoServer and random port selection
final Configuration conf = new Configuration();
conf.setInt(HConstants.MASTER_INFO_PORT, 0);
return conf;
})
.build();
private static final ConnectionRule connectionRule =
new ConnectionRule(miniClusterRule::createConnection);
private static final ClassSetup classRule = new ClassSetup(connectionRule::getConnection);
private static final class ClassSetup extends ExternalResource {
private final Supplier<AsyncConnection> connectionSupplier;
private final TableName tableName;
private AsyncAdmin admin;
private WebTarget target;
public ClassSetup(final Supplier<AsyncConnection> connectionSupplier) {
this.connectionSupplier = connectionSupplier;
tableName = TableName.valueOf(TestApiV1ClusterMetricsResource.class.getSimpleName());
}
public WebTarget getTarget() {
return target;
}
@Override
protected void before() throws Throwable {
final AsyncConnection conn = connectionSupplier.get();
admin = conn.getAdmin();
final TableDescriptor tableDescriptor = TableDescriptorBuilder
.newBuilder(tableName)
.setColumnFamily(
ColumnFamilyDescriptorBuilder.newBuilder(Bytes.toBytes("c")).build())
.setDurability(Durability.SKIP_WAL)
.build();
admin.createTable(tableDescriptor).get();
final String baseUrl = admin.getMaster()
.thenApply(ServerName::getHostname)
.thenCombine(
admin.getMasterInfoPort(),
(hostName, infoPort) -> "http://" + hostName + ":" + infoPort)
.get();
final Client client = ClientBuilder.newClient();
target = client.target(baseUrl).path("api/v1/cluster_metrics");
}
@Override
protected void after() {
final TableName tableName = TableName.valueOf("test");
try {
admin.tableExists(tableName)
.thenCompose(val -> {
if (val) {
return admin.disableTable(tableName)
.thenCompose(ignored -> admin.deleteTable(tableName));
} else {
return CompletableFuture.completedFuture(null);
}
})
.get();
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
@ClassRule
public static RuleChain ruleChain = RuleChain.outerRule(miniClusterRule)
.around(connectionRule)
.around(classRule);
@Test
public void testGetRoot() {
final String response = classRule.getTarget()
.request(MediaType.APPLICATION_JSON_TYPE)
.get(String.class);
assertThat(response, allOf(
containsString("\"hbase_version\":"),
containsString("\"cluster_id\":"),
containsString("\"master_name\":"),
containsString("\"backup_master_names\":")));
}
@Test
public void testGetRootHtml() {
assertThrows(NotAcceptableException.class, () -> classRule.getTarget()
.request(MediaType.TEXT_HTML_TYPE)
.get(String.class));
}
@Test
public void testGetLiveServers() {
final String response = classRule.getTarget()
.path("live_servers")
.request(MediaType.APPLICATION_JSON_TYPE)
.get(String.class);
assertThat(response, allOf(
startsWith("{\"data\":["),
endsWith("]}")));
}
@Test
public void testGetLiveServersHtml() {
assertThrows(NotAcceptableException.class, () -> classRule.getTarget()
.path("live_servers")
.request(MediaType.TEXT_HTML_TYPE)
.get(String.class));
}
@Test
public void testGetDeadServers() {
final String response = classRule.getTarget()
.path("dead_servers")
.request(MediaType.APPLICATION_JSON_TYPE)
.get(String.class);
assertThat(response, allOf(
startsWith("{\"data\":["),
endsWith("]}")));
}
@Test
public void testGetDeadServersHtml() {
assertThrows(NotAcceptableException.class, () -> classRule.getTarget()
.path("dead_servers")
.request(MediaType.TEXT_HTML_TYPE)
.get(String.class));
}
}

View File

@ -0,0 +1,105 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.http.gson;
import static org.junit.Assert.assertEquals;
import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.Size;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.apache.hadoop.hbase.util.Bytes;
import org.junit.BeforeClass;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.apache.hbase.thirdparty.com.google.gson.Gson;
@Category({ MasterTests.class, SmallTests.class})
public class GsonFactoryTest {
@ClassRule
public static final HBaseClassTestRule CLASS_RULE =
HBaseClassTestRule.forClass(GsonFactoryTest.class);
private static Gson gson;
@BeforeClass
public static void beforeClass() {
gson = GsonFactory.buildGson();
}
@Test
public void testSerializeToLowerCaseUnderscores() {
final SomeBean input = new SomeBean(false, 57, "hello\n");
final String actual = gson.toJson(input);
final String expected = "{\"a_boolean\":false,\"an_int\":57,\"a_string\":\"hello\\n\"}";
assertEquals(expected, actual);
}
@Test
public void testSerializeMapWithSizeKeys() {
final Map<Size, String> input = new TreeMap<>();
input.put(new Size(10, Size.Unit.KILOBYTE), "10kb");
input.put(new Size(5, Size.Unit.MEGABYTE), "5mb");
final String actual = gson.toJson(input);
final String expected = "{\"10240.0\":\"10kb\",\"5242880.0\":\"5mb\"}";
assertEquals(expected, actual);
}
@Test
public void testSerializeNonPrintableByteArrays() {
final Map<byte[], byte[]> input = new TreeMap<>(Bytes.BYTES_COMPARATOR);
input.put(Bytes.toBytes("this is printable"), new byte[] { 0, 1, 2, 3, 4, 5 });
input.put(new byte[] { -127, -63, 0, 63, 127 }, Bytes.toBytes("test"));
final String actual = gson.toJson(input);
final String expected = "{" +
"\"this is printable\":\"\\u0000\\u0001\\u0002\\u0003\\u0004\\u0005\"," +
"\"<EFBFBD><EFBFBD>\\u0000?\u007F\":\"test\"}";
assertEquals(expected, actual);
}
private static final class SomeBean {
private final boolean aBoolean;
private final int anInt;
private final String aString;
public SomeBean(
final boolean aBoolean,
final int anInt,
final String aString
) {
this.aBoolean = aBoolean;
this.anInt = anInt;
this.aString = aString;
}
public boolean isaBoolean() {
return aBoolean;
}
public int getAnInt() {
return anInt;
}
public String getaString() {
return aString;
}
}
}