HBASE-23653 Expose content of meta table in web ui (#1020)

Adds a display of the content of 'hbase:meta' to the Master's
table.jsp, when that table is selected. Supports basic pagination,
filtering, &c.

Signed-off-by: stack <stack@apache.org>
Signed-off-by: Bharath Vissapragada <bharathv@apache.org>
This commit is contained in:
Nick Dimiduk 2020-01-16 08:46:39 -08:00 committed by GitHub
parent 04d789f1a7
commit 00fc46756a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 1680 additions and 27 deletions

View File

@ -18,7 +18,10 @@
package org.apache.hadoop.hbase;
import java.util.Arrays;
import java.util.Collection;
import java.util.Iterator;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.util.Bytes;
@ -31,7 +34,7 @@ import org.apache.yetus.audience.InterfaceAudience;
* (assuming small number of locations)
*/
@InterfaceAudience.Private
public class RegionLocations {
public class RegionLocations implements Iterable<HRegionLocation> {
private final int numNonNullElements;
@ -361,6 +364,11 @@ public class RegionLocations {
return null;
}
@Override
public Iterator<HRegionLocation> iterator() {
return Arrays.asList(locations).iterator();
}
@Override
public String toString() {
StringBuilder builder = new StringBuilder("[");

View File

@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos;
/**
* Convenience class for composing an instance of {@link TableDescriptor}.
* @since 2.0.0
*/
@InterfaceAudience.Public

View File

@ -442,6 +442,11 @@
<artifactId>hamcrest-core</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.bouncycastle</groupId>
<artifactId>bcprov-jdk15on</artifactId>

View File

@ -0,0 +1,424 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.webapp;
import java.io.UnsupportedEncodingException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.stream.StreamSupport;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hbase.CompareOperator;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.AsyncTable;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hbase.thirdparty.com.google.common.collect.Iterators;
import org.apache.hbase.thirdparty.io.netty.handler.codec.http.QueryStringEncoder;
/**
* <p>
* Support class for the "Meta Entries" section in {@code resources/hbase-webapps/master/table.jsp}.
* </p>
* <p>
* <b>Interface</b>. This class's intended consumer is {@code table.jsp}. As such, it's primary
* interface is the active {@link HttpServletRequest}, from which it uses the {@code scan_*}
* request parameters. This class supports paging through an optionally filtered view of the
* contents of {@code hbase:meta}. Those filters and the pagination offset are specified via these
* request parameters. It provides helper methods for constructing pagination links.
* <ul>
* <li>{@value #NAME_PARAM} - the name of the table requested. The only table of our concern here
* is {@code hbase:meta}; any other value is effectively ignored by the giant conditional in the
* jsp.</li>
* <li>{@value #SCAN_LIMIT_PARAM} - specifies a limit on the number of region (replicas) rendered
* on the by the table in a single request -- a limit on page size. This corresponds to the
* number of {@link RegionReplicaInfo} objects produced by {@link Results#iterator()}. When a
* value for {@code scan_limit} is invalid or not specified, the default value of
* {@value #SCAN_LIMIT_DEFAULT} is used. In order to avoid excessive resource consumption, a
* maximum value of {@value #SCAN_LIMIT_MAX} is enforced.</li>
* <li>{@value #SCAN_REGION_STATE_PARAM} - an optional filter on {@link RegionState}.</li>
* <li>{@value #SCAN_START_PARAM} - specifies the rowkey at which a scan should start. For usage
* details, see the below section on <b>Pagination</b>.</li>
* <li>{@value #SCAN_TABLE_PARAM} - specifies a filter on the values returned, limiting them to
* regions from a specified table. This parameter is implemented as a prefix filter on the
* {@link Scan}, so in effect it can be used for simple namespace and multi-table matches.</li>
* </ul>
* </p>
* <p>
* <b>Pagination</b>. A single page of results are made available via {@link #getResults()} / an
* instance of {@link Results}. Callers use its {@link Iterator} consume the page of
* {@link RegionReplicaInfo} instances, each of which represents a region or region replica. Helper
* methods are provided for building page navigation controls preserving the user's selected filter
* set: {@link #buildFirstPageUrl()}, {@link #buildNextPageUrl(byte[])}. Pagination is implemented
* using a simple offset + limit system. Offset is provided by the {@value #SCAN_START_PARAM},
* limit via {@value #SCAN_LIMIT_PARAM}. Under the hood, the {@link Scan} is constructed with
* {@link Scan#setMaxResultSize(long)} set to ({@value SCAN_LIMIT_PARAM} +1), while the
* {@link Results} {@link Iterator} honors {@value #SCAN_LIMIT_PARAM}. The +1 allows the caller to
* know if a "next page" is available via {@link Results#hasMoreResults()}. Note that this
* pagination strategy is incomplete when it comes to region replicas and can potentially omit
* rendering replicas that fall between the last rowkey offset and {@code replicaCount % page size}.
* </p>
* <p>
* <b>Error Messages</b>. Any time there's an error parsing user input, a message will be populated
* in {@link #getErrorMessages()}. Any fields which produce an error will have their filter values
* set to the default, except for a value of {@value #SCAN_LIMIT_PARAM} that exceeds
* {@value #SCAN_LIMIT_MAX}, in which case {@value #SCAN_LIMIT_MAX} is used.
* </p>
*/
@InterfaceAudience.Private
public class MetaBrowser {
public static final String NAME_PARAM = "name";
public static final String SCAN_LIMIT_PARAM = "scan_limit";
public static final String SCAN_REGION_STATE_PARAM = "scan_region_state";
public static final String SCAN_START_PARAM = "scan_start";
public static final String SCAN_TABLE_PARAM = "scan_table";
public static final int SCAN_LIMIT_DEFAULT = 10;
public static final int SCAN_LIMIT_MAX = 10_000;
private final AsyncConnection connection;
private final HttpServletRequest request;
private final List<String> errorMessages;
private final String name;
private final Integer scanLimit;
private final RegionState.State scanRegionState;
private final byte[] scanStart;
private final TableName scanTable;
public MetaBrowser(final AsyncConnection connection, final HttpServletRequest request) {
this.connection = connection;
this.request = request;
this.errorMessages = new LinkedList<>();
this.name = resolveName(request);
this.scanLimit = resolveScanLimit(request);
this.scanRegionState = resolveScanRegionState(request);
this.scanStart = resolveScanStart(request);
this.scanTable = resolveScanTable(request);
}
public List<String> getErrorMessages() {
return errorMessages;
}
public String getName() {
return name;
}
public Integer getScanLimit() {
return scanLimit;
}
public byte[] getScanStart() {
return scanStart;
}
public RegionState.State getScanRegionState() {
return scanRegionState;
}
public TableName getScanTable() {
return scanTable;
}
public Results getResults() {
final AsyncTable<AdvancedScanResultConsumer> asyncTable =
connection.getTable(TableName.META_TABLE_NAME);
return new Results(asyncTable.getScanner(buildScan()));
}
@Override
public String toString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE)
.append("scanStart", scanStart)
.append("scanLimit", scanLimit)
.append("scanTable", scanTable)
.append("scanRegionState", scanRegionState)
.toString();
}
private static String resolveName(final HttpServletRequest request) {
return resolveRequestParameter(request, NAME_PARAM);
}
private Integer resolveScanLimit(final HttpServletRequest request) {
final String requestValueStr = resolveRequestParameter(request, SCAN_LIMIT_PARAM);
if (StringUtils.isBlank(requestValueStr)) {
return null;
}
final Integer requestValue = tryParseInt(requestValueStr);
if (requestValue == null) {
errorMessages.add(buildScanLimitMalformedErrorMessage(requestValueStr));
return null;
}
if (requestValue <= 0) {
errorMessages.add(buildScanLimitLTEQZero(requestValue));
return SCAN_LIMIT_DEFAULT;
}
final int truncatedValue = Math.min(requestValue, SCAN_LIMIT_MAX);
if (requestValue != truncatedValue) {
errorMessages.add(buildScanLimitExceededErrorMessage(requestValue));
}
return truncatedValue;
}
private RegionState.State resolveScanRegionState(final HttpServletRequest request) {
final String requestValueStr = resolveRequestParameter(request, SCAN_REGION_STATE_PARAM);
if (requestValueStr == null) {
return null;
}
final RegionState.State requestValue = tryValueOf(RegionState.State.class, requestValueStr);
if (requestValue == null) {
errorMessages.add(buildScanRegionStateMalformedErrorMessage(requestValueStr));
return null;
}
return requestValue;
}
private static byte[] resolveScanStart(final HttpServletRequest request) {
// TODO: handle replicas that fall between the last rowkey and pagination limit.
final String requestValue = resolveRequestParameter(request, SCAN_START_PARAM);
if (requestValue == null) {
return null;
}
return Bytes.toBytesBinary(requestValue);
}
private static TableName resolveScanTable(final HttpServletRequest request) {
final String requestValue = resolveRequestParameter(request, SCAN_TABLE_PARAM);
if (requestValue == null) {
return null;
}
return TableName.valueOf(requestValue);
}
private static String resolveRequestParameter(final HttpServletRequest request,
final String param) {
if (request == null) {
return null;
}
final String requestValueStrEnc = request.getParameter(param);
if (StringUtils.isBlank(requestValueStrEnc)) {
return null;
}
return urlDecode(requestValueStrEnc);
}
private static Filter buildTableFilter(final TableName tableName) {
return new PrefixFilter(tableName.toBytes());
}
private static Filter buildScanRegionStateFilter(final RegionState.State state) {
return new SingleColumnValueFilter(
HConstants.CATALOG_FAMILY,
HConstants.STATE_QUALIFIER,
CompareOperator.EQUAL,
// use the same serialization strategy as found in MetaTableAccessor#addRegionStateToPut
Bytes.toBytes(state.name()));
}
private Filter buildScanFilter() {
if (scanTable == null && scanRegionState == null) {
return null;
}
final List<Filter> filters = new ArrayList<>(2);
if (scanTable != null) {
filters.add(buildTableFilter(scanTable));
}
if (scanRegionState != null) {
filters.add(buildScanRegionStateFilter(scanRegionState));
}
if (filters.size() == 1) {
return filters.get(0);
}
return new FilterList(FilterList.Operator.MUST_PASS_ALL, filters);
}
private Scan buildScan() {
final Scan metaScan = new Scan()
.addFamily(HConstants.CATALOG_FAMILY)
.readVersions(1)
.setLimit((scanLimit != null ? scanLimit : SCAN_LIMIT_DEFAULT) + 1);
if (scanStart != null) {
metaScan.withStartRow(scanStart, false);
}
final Filter filter = buildScanFilter();
if (filter != null) {
metaScan.setFilter(filter);
}
return metaScan;
}
/**
* Adds {@code value} to {@code encoder} under {@code paramName} when {@code value} is non-null.
*/
private void addParam(final QueryStringEncoder encoder, final String paramName,
final Object value) {
if (value != null) {
encoder.addParam(paramName, value.toString());
}
}
private QueryStringEncoder buildFirstPageEncoder() {
final QueryStringEncoder encoder =
new QueryStringEncoder(request.getRequestURI());
addParam(encoder, NAME_PARAM, name);
addParam(encoder, SCAN_LIMIT_PARAM, scanLimit);
addParam(encoder, SCAN_REGION_STATE_PARAM, scanRegionState);
addParam(encoder, SCAN_TABLE_PARAM, scanTable);
return encoder;
}
public String buildFirstPageUrl() {
return buildFirstPageEncoder().toString();
}
static String buildStartParamFrom(final byte[] lastRow) {
if (lastRow == null) {
return null;
}
return urlEncode(Bytes.toStringBinary(lastRow));
}
public String buildNextPageUrl(final byte[] lastRow) {
final QueryStringEncoder encoder = buildFirstPageEncoder();
final String startRow = buildStartParamFrom(lastRow);
addParam(encoder, SCAN_START_PARAM, startRow);
return encoder.toString();
}
private static String urlEncode(final String val) {
if (StringUtils.isEmpty(val)) {
return null;
}
try {
return URLEncoder.encode(val, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
return null;
}
}
private static String urlDecode(final String val) {
if (StringUtils.isEmpty(val)) {
return null;
}
try {
return URLDecoder.decode(val, StandardCharsets.UTF_8.toString());
} catch (UnsupportedEncodingException e) {
return null;
}
}
private static Integer tryParseInt(final String val) {
if (StringUtils.isEmpty(val)) {
return null;
}
try {
return Integer.parseInt(val);
} catch (NumberFormatException e) {
return null;
}
}
private static <T extends Enum<T>> T tryValueOf(final Class<T> clazz,
final String value) {
if (clazz == null || value == null) {
return null;
}
try {
return T.valueOf(clazz, value);
} catch (IllegalArgumentException e) {
return null;
}
}
private static String buildScanLimitExceededErrorMessage(final int requestValue) {
return String.format(
"Requested SCAN_LIMIT value %d exceeds maximum value %d.", requestValue, SCAN_LIMIT_MAX);
}
private static String buildScanLimitMalformedErrorMessage(final String requestValue) {
return String.format(
"Requested SCAN_LIMIT value '%s' cannot be parsed as an integer.", requestValue);
}
private static String buildScanLimitLTEQZero(final int requestValue) {
return String.format("Requested SCAN_LIMIT value %d is <= 0.", requestValue);
}
private static String buildScanRegionStateMalformedErrorMessage(final String requestValue) {
return String.format(
"Requested SCAN_REGION_STATE value '%s' cannot be parsed as a RegionState.", requestValue);
}
/**
* Encapsulates the results produced by this {@link MetaBrowser} instance.
*/
public final class Results implements AutoCloseable, Iterable<RegionReplicaInfo> {
private final ResultScanner resultScanner;
private final Iterator<RegionReplicaInfo> sourceIterator;
private Results(final ResultScanner resultScanner) {
this.resultScanner = resultScanner;
this.sourceIterator = StreamSupport.stream(resultScanner.spliterator(), false)
.map(RegionReplicaInfo::from)
.flatMap(Collection::stream)
.iterator();
}
/**
* @return {@code true} when the underlying {@link ResultScanner} is not yet exhausted,
* {@code false} otherwise.
*/
public boolean hasMoreResults() {
return sourceIterator.hasNext();
}
@Override
public void close() {
if (resultScanner != null) {
resultScanner.close();
}
}
@Override public Iterator<RegionReplicaInfo> iterator() {
return Iterators.limit(sourceIterator, scanLimit != null ? scanLimit : SCAN_LIMIT_DEFAULT);
}
}
}

View File

@ -0,0 +1,143 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.webapp;
import java.util.Collections;
import java.util.List;
import java.util.stream.Collectors;
import java.util.stream.StreamSupport;
import org.apache.commons.lang3.builder.EqualsBuilder;
import org.apache.commons.lang3.builder.HashCodeBuilder;
import org.apache.commons.lang3.builder.ToStringBuilder;
import org.apache.commons.lang3.builder.ToStringStyle;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.assignment.RegionStateStore;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.yetus.audience.InterfaceAudience;
/**
* A POJO that consolidates the information about a single region replica that's stored in meta.
*/
@InterfaceAudience.Private
public final class RegionReplicaInfo {
private final byte[] row;
private final RegionInfo regionInfo;
private final RegionState.State regionState;
private final ServerName serverName;
private RegionReplicaInfo(final Result result, final HRegionLocation location) {
this.row = result != null ? result.getRow() : null;
this.regionInfo = location != null ? location.getRegion() : null;
this.regionState = (result != null && regionInfo != null)
? RegionStateStore.getRegionState(result, regionInfo)
: null;
this.serverName = location != null ? location.getServerName() : null;
}
public static List<RegionReplicaInfo> from(final Result result) {
if (result == null) {
return Collections.singletonList(null);
}
final RegionLocations locations = MetaTableAccessor.getRegionLocations(result);
if (locations == null) {
return Collections.singletonList(null);
}
return StreamSupport.stream(locations.spliterator(), false)
.map(location -> new RegionReplicaInfo(result, location))
.collect(Collectors.toList());
}
public byte[] getRow() {
return row;
}
public RegionInfo getRegionInfo() {
return regionInfo;
}
public byte[] getRegionName() {
return regionInfo != null ? regionInfo.getRegionName() : null;
}
public byte[] getStartKey() {
return regionInfo != null ? regionInfo.getStartKey() : null;
}
public byte[] getEndKey() {
return regionInfo != null ? regionInfo.getEndKey() : null;
}
public Integer getReplicaId() {
return regionInfo != null ? regionInfo.getReplicaId() : null;
}
public RegionState.State getRegionState() {
return regionState;
}
public ServerName getServerName() {
return serverName;
}
@Override
public boolean equals(Object other) {
if (this == other) {
return true;
}
if (other == null || getClass() != other.getClass()) {
return false;
}
RegionReplicaInfo that = (RegionReplicaInfo) other;
return new EqualsBuilder()
.append(row, that.row)
.append(regionInfo, that.regionInfo)
.append(regionState, that.regionState)
.append(serverName, that.serverName)
.isEquals();
}
@Override
public int hashCode() {
return new HashCodeBuilder(17, 37)
.append(row)
.append(regionInfo)
.append(regionState)
.append(serverName)
.toHashCode();
}
@Override public String toString() {
return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE)
.append("row", Bytes.toStringBinary(row))
.append("regionInfo", regionInfo)
.append("regionState", regionState)
.append("serverName", serverName)
.toString();
}
}

View File

@ -17,76 +17,96 @@
* limitations under the License.
*/
--%>
<%@page import="java.net.URLEncoder"%>
<%@ page contentType="text/html;charset=UTF-8"
import="static org.apache.commons.lang3.StringEscapeUtils.escapeXml"
import="java.net.URLEncoder"
import="java.util.ArrayList"
import="java.util.Collection"
import="java.util.HashMap"
import="java.util.LinkedHashMap"
import="java.util.List"
import="java.util.Map"
import="java.util.Optional"
import="java.util.TreeMap"
import="java.util.concurrent.TimeUnit"
import="org.apache.commons.lang3.StringEscapeUtils"
import="org.apache.hadoop.conf.Configuration"
import="org.apache.hadoop.hbase.HTableDescriptor"
import="org.apache.hadoop.hbase.HColumnDescriptor"
import="org.apache.hadoop.hbase.HConstants"
import="org.apache.hadoop.hbase.HRegionLocation"
import="org.apache.hadoop.hbase.HTableDescriptor"
import="org.apache.hadoop.hbase.RegionMetrics"
import="org.apache.hadoop.hbase.RegionMetricsBuilder"
import="org.apache.hadoop.hbase.ServerMetrics"
import="org.apache.hadoop.hbase.ServerName"
import="org.apache.hadoop.hbase.Size"
import="org.apache.hadoop.hbase.TableName"
import="org.apache.hadoop.hbase.TableNotFoundException"
import="org.apache.hadoop.hbase.client.AsyncAdmin"
import="org.apache.hadoop.hbase.client.AsyncConnection"
import="org.apache.hadoop.hbase.client.CompactionState"
import="org.apache.hadoop.hbase.client.ConnectionFactory"
import="org.apache.hadoop.hbase.client.RegionInfo"
import="org.apache.hadoop.hbase.client.RegionInfoBuilder"
import="org.apache.hadoop.hbase.client.RegionLocator"
import="org.apache.hadoop.hbase.client.RegionReplicaUtil"
import="org.apache.hadoop.hbase.client.Table"
import="org.apache.hadoop.hbase.master.HMaster"
import="org.apache.hadoop.hbase.master.assignment.RegionStates"
import="org.apache.hadoop.hbase.master.RegionState"
import="org.apache.hadoop.hbase.master.assignment.RegionStates"
import="org.apache.hadoop.hbase.master.webapp.MetaBrowser"
import="org.apache.hadoop.hbase.master.webapp.RegionReplicaInfo"
import="org.apache.hadoop.hbase.quotas.QuotaSettingsFactory"
import="org.apache.hadoop.hbase.quotas.QuotaTableUtil"
import="org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot"
import="org.apache.hadoop.hbase.quotas.ThrottleSettings"
import="org.apache.hadoop.hbase.util.Bytes"
import="org.apache.hadoop.hbase.util.FSUtils"
import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator"
import="org.apache.hadoop.util.StringUtils"
import="org.apache.hbase.thirdparty.com.google.protobuf.ByteString"%>
import="org.apache.hadoop.hbase.quotas.QuotaTableUtil"%>
<%@ page import="org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot" %>
<%@ page import="org.apache.hadoop.hbase.quotas.ThrottleSettings" %>
<%@ page import="org.apache.hadoop.hbase.util.Bytes" %>
<%@ page import="org.apache.hadoop.hbase.util.FSUtils" %>
<%@ page import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator" %>
<%@ page import="org.apache.hadoop.util.StringUtils" %>
<%@ page import="org.apache.hbase.thirdparty.com.google.protobuf.ByteString" %>
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos" %>
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %>
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %>
<%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %>
<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %>
<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %>
<%@ page import="org.apache.hadoop.hbase.Size" %>
<%@ page import="org.apache.hadoop.hbase.RegionMetricsBuilder" %>
<%!
/**
* @return An empty region load stamped with the passed in <code>regionInfo</code>
* region name.
*/
private RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) {
private static RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) {
return RegionMetricsBuilder.toRegionMetrics(ClusterStatusProtos.RegionLoad.newBuilder().
setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder().
setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME).
setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build());
}
/**
* Given dicey information that may or not be available in meta, render a link to the region on
* its region server.
* @return an anchor tag if one can be built, {@code null} otherwise.
*/
private static String buildRegionServerLink(final ServerName serverName, final int rsInfoPort,
final RegionInfo regionInfo, final RegionState.State regionState) {
if (serverName == null || regionInfo == null) { return null; }
if (regionState != RegionState.State.OPEN) {
// region is assigned to RS, but RS knows nothing of it. don't bother with a link.
return serverName.getServerName();
}
final String socketAddress = serverName.getHostname() + ":" + rsInfoPort;
final String URI = "//" + socketAddress + "/region.jsp"
+ "?name=" + regionInfo.getEncodedName();
return "<a href=\"" + URI + "\">" + serverName.getServerName() + "</a>";
}
%>
<%
final String ZEROKB = "0 KB";
final String ZEROMB = "0 MB";
HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER);
Configuration conf = master.getConfiguration();
String fqtn = request.getParameter("name");
final String escaped_fqtn = StringEscapeUtils.escapeHtml4(fqtn);
Table table;
String tableHeader;
boolean withReplica = false;
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false);
@ -127,8 +147,11 @@
pageTitle = "Table: " + escaped_fqtn;
}
pageContext.setAttribute("pageTitle", pageTitle);
AsyncConnection connection = ConnectionFactory.createAsyncConnection(master.getConfiguration()).get();
AsyncAdmin admin = connection.getAdminBuilder().setOperationTimeout(5, TimeUnit.SECONDS).build();
final AsyncConnection connection = master.getAsyncConnection();
final AsyncAdmin admin = connection.getAdminBuilder()
.setOperationTimeout(5, TimeUnit.SECONDS)
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
%>
<jsp:include page="header.jsp">
@ -351,6 +374,134 @@ if (fqtn != null && master.isInitialized()) {
</div>
</div>
</div>
<h2 id="meta-entries">Meta Entries</h2>
<%
if (!metaBrowser.getErrorMessages().isEmpty()) {
for (final String errorMessage : metaBrowser.getErrorMessages()) {
%>
<div class="alert alert-warning" role="alert">
<%= errorMessage %>
</div>
<%
}
}
%>
<table class="table table-striped">
<tr>
<th>RegionName</th>
<th>Start Key</th>
<th>End Key</th>
<th>Replica ID</th>
<th>RegionState</th>
<th>ServerName</th>
</tr>
<%
final boolean metaScanHasMore;
byte[] lastRow = null;
try (final MetaBrowser.Results results = metaBrowser.getResults()) {
for (final RegionReplicaInfo regionReplicaInfo : results) {
lastRow = Optional.ofNullable(regionReplicaInfo)
.map(RegionReplicaInfo::getRow)
.orElse(null);
if (regionReplicaInfo == null) {
%>
<tr>
<td colspan="6">Null result</td>
</tr>
<%
continue;
}
final String regionNameDisplay = regionReplicaInfo.getRegionName() != null
? Bytes.toStringBinary(regionReplicaInfo.getRegionName())
: "";
final String startKeyDisplay = regionReplicaInfo.getStartKey() != null
? Bytes.toStringBinary(regionReplicaInfo.getStartKey())
: "";
final String endKeyDisplay = regionReplicaInfo.getEndKey() != null
? Bytes.toStringBinary(regionReplicaInfo.getEndKey())
: "";
final String replicaIdDisplay = regionReplicaInfo.getReplicaId() != null
? regionReplicaInfo.getReplicaId().toString()
: "";
final String regionStateDisplay = regionReplicaInfo.getRegionState() != null
? regionReplicaInfo.getRegionState().toString()
: "";
final RegionInfo regionInfo = regionReplicaInfo.getRegionInfo();
final ServerName serverName = regionReplicaInfo.getServerName();
final RegionState.State regionState = regionReplicaInfo.getRegionState();
final int rsPort = master.getRegionServerInfoPort(serverName);
%>
<tr>
<td><%= regionNameDisplay %></td>
<td><%= startKeyDisplay %></td>
<td><%= endKeyDisplay %></td>
<td><%= replicaIdDisplay %></td>
<td><%= regionStateDisplay %></td>
<td><%= buildRegionServerLink(serverName, rsPort, regionInfo, regionState) %></td>
</tr>
<%
}
metaScanHasMore = results.hasMoreResults();
}
%>
</table>
<div class="row">
<div class="col-md-4">
<ul class="pagination" style="margin: 20px 0">
<li>
<a href="<%= metaBrowser.buildFirstPageUrl() %>" aria-label="Previous">
<span aria-hidden="true">&#x21E4;</span>
</a>
</li>
<li<%= metaScanHasMore ? "" : " class=\"disabled\"" %>>
<a<%= metaScanHasMore ? " href=\"" + metaBrowser.buildNextPageUrl(lastRow) + "\"" : "" %> aria-label="Next">
<span aria-hidden="true">&raquo;</span>
</a>
</li>
</ul>
</div>
<div class="col-md-8">
<form action="/table.jsp" method="get" class="form-inline pull-right" style="margin: 20px 0">
<input type="hidden" name="name" value="<%= TableName.META_TABLE_NAME %>" />
<div class="form-group">
<label for="scan-limit">Scan Limit</label>
<input type="text" id="scan-limit" name="<%= MetaBrowser.SCAN_LIMIT_PARAM %>"
class="form-control" placeholder="<%= MetaBrowser.SCAN_LIMIT_DEFAULT %>"
<%= metaBrowser.getScanLimit() != null
? "value=\"" + metaBrowser.getScanLimit() + "\""
: ""
%>
aria-describedby="scan-limit" style="display:inline; width:auto" />
<label for="table-name-filter">Table</label>
<input type="text" id="table-name-filter" name="<%= MetaBrowser.SCAN_TABLE_PARAM %>"
<%= metaBrowser.getScanTable() != null
? "value=\"" + metaBrowser.getScanTable() + "\""
: ""
%>
aria-describedby="scan-filter-table" style="display:inline; width:auto" />
<label for="region-state-filter">Region State</label>
<select class="form-control" id="region-state-filter" style="display:inline; width:auto"
name="<%= MetaBrowser.SCAN_REGION_STATE_PARAM %>">
<option></option>
<%
for (final RegionState.State state : RegionState.State.values()) {
final boolean selected = metaBrowser.getScanRegionState() == state;
%>
<option<%= selected ? " selected" : "" %>><%= state %></option>
<%
}
%>
</select>
<button type="submit" class="btn btn-primary" style="display:inline; width:auto">
Filter Results
</button>
</div>
</form>
</div>
</div>
<%} else {
RegionStates states = master.getAssignmentManager().getRegionStates();
Map<RegionState.State, List<RegionInfo>> regionStates = states.getRegionByStateOfTable(table.getName());
@ -838,8 +989,6 @@ if (withReplica) {
for(StackTraceElement element : ex.getStackTrace()) {
%><%= StringEscapeUtils.escapeHtml4(element.toString()) %><%
}
} finally {
connection.close();
}
} // end else
%>

View File

@ -0,0 +1,167 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.util.List;
import java.util.Objects;
import java.util.StringJoiner;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
import java.util.stream.Collectors;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.rules.ExternalResource;
import org.junit.rules.TestRule;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* A {@link TestRule} that clears all user namespaces and tables
* {@link ExternalResource#before() before} the test executes. Can be used in either the
* {@link Rule} or {@link ClassRule} positions. Lazily realizes the provided
* {@link AsyncConnection} so as to avoid initialization races with other {@link Rule Rules}.
* <b>Does not</b> {@link AsyncConnection#close() close()} provided connection instance when
* finished.
* </p>
* Use in combination with {@link MiniClusterRule} and {@link ConnectionRule}, for example:
*
* <pre>{@code
* public class TestMyClass {
* @ClassRule
* public static final MiniClusterRule miniClusterRule = new MiniClusterRule();
*
* private final ConnectionRule connectionRule =
* new ConnectionRule(miniClusterRule::createConnection);
* private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule =
* new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
*
* @Rule
* public TestRule rule = RuleChain
* .outerRule(connectionRule)
* .around(clearUserNamespacesAndTablesRule);
* }
* }</pre>
*/
public class ClearUserNamespacesAndTablesRule extends ExternalResource {
private static final Logger logger =
LoggerFactory.getLogger(ClearUserNamespacesAndTablesRule.class);
private final Supplier<AsyncConnection> connectionSupplier;
private AsyncAdmin admin;
public ClearUserNamespacesAndTablesRule(final Supplier<AsyncConnection> connectionSupplier) {
this.connectionSupplier = connectionSupplier;
}
@Override
protected void before() throws Throwable {
final AsyncConnection connection = Objects.requireNonNull(connectionSupplier.get());
admin = connection.getAdmin();
clearTablesAndNamespaces().join();
}
private CompletableFuture<Void> clearTablesAndNamespaces() {
return deleteUserTables().thenCompose(_void -> deleteUserNamespaces());
}
private CompletableFuture<Void> deleteUserTables() {
return listTableNames()
.thenApply(tableNames -> tableNames.stream()
.map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName)))
.toArray(CompletableFuture[]::new))
.thenCompose(CompletableFuture::allOf);
}
private CompletableFuture<List<TableName>> listTableNames() {
return CompletableFuture
.runAsync(() -> logger.trace("listing tables"))
.thenCompose(_void -> admin.listTableNames(false))
.thenApply(tableNames -> {
if (logger.isTraceEnabled()) {
final StringJoiner joiner = new StringJoiner(", ", "[", "]");
tableNames.stream().map(TableName::getNameAsString).forEach(joiner::add);
logger.trace("found existing tables {}", joiner.toString());
}
return tableNames;
});
}
private CompletableFuture<Boolean> isTableEnabled(final TableName tableName) {
return admin.isTableEnabled(tableName)
.thenApply(isEnabled -> {
logger.trace("table {} is enabled.", tableName);
return isEnabled;
});
}
private CompletableFuture<Void> disableIfEnabled(final TableName tableName) {
return isTableEnabled(tableName)
.thenCompose(isEnabled -> isEnabled
? disableTable(tableName)
: CompletableFuture.completedFuture(null));
}
private CompletableFuture<Void> disableTable(final TableName tableName) {
return CompletableFuture
.runAsync(() -> logger.trace("disabling enabled table {}", tableName))
.thenCompose(_void -> admin.disableTable(tableName));
}
private CompletableFuture<Void> deleteTable(final TableName tableName) {
return CompletableFuture
.runAsync(() -> logger.trace("deleting disabled table {}", tableName))
.thenCompose(_void -> admin.deleteTable(tableName));
}
private CompletableFuture<List<String>> listUserNamespaces() {
return CompletableFuture
.runAsync(() -> logger.trace("listing namespaces"))
.thenCompose(_void -> admin.listNamespaceDescriptors())
.thenApply(namespaceDescriptors -> {
final StringJoiner joiner = new StringJoiner(", ", "[", "]");
final List<String> names = namespaceDescriptors.stream()
.map(NamespaceDescriptor::getName)
.peek(joiner::add)
.collect(Collectors.toList());
logger.trace("found existing namespaces {}", joiner);
return names;
})
.thenApply(namespaces -> namespaces.stream()
.filter(namespace -> !Objects.equals(
namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName()))
.filter(namespace -> !Objects.equals(
namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName()))
.collect(Collectors.toList()));
}
private CompletableFuture<Void> deleteNamespace(final String namespace) {
return CompletableFuture
.runAsync(() -> logger.trace("deleting namespace {}", namespace))
.thenCompose(_void -> admin.deleteNamespace(namespace));
}
private CompletableFuture<Void> deleteUserNamespaces() {
return listUserNamespaces()
.thenCompose(namespaces -> CompletableFuture.allOf(namespaces.stream()
.map(this::deleteNamespace)
.toArray(CompletableFuture[]::new)));
}
}

View File

@ -0,0 +1,75 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import java.util.function.Supplier;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.rules.ExternalResource;
/**
* A {@link Rule} that manages the lifecycle of an instance of {@link AsyncConnection}. Can be used
* in either the {@link Rule} or {@link ClassRule} positions.
* </p>
* Use in combination with {@link MiniClusterRule}, for example:
*
* <pre>{@code
* public class TestMyClass {
* private static final MiniClusterRule miniClusterRule = new MiniClusterRule();
* private static final ConnectionRule connectionRule =
* new ConnectionRule(miniClusterRule::createConnection);
*
* @ClassRule
* public static final TestRule rule = RuleChain
* .outerRule(connectionRule)
* .around(connectionRule);
* }
* }</pre>
*/
public class ConnectionRule extends ExternalResource {
private final Supplier<CompletableFuture<AsyncConnection>> connectionSupplier;
private AsyncConnection connection;
public ConnectionRule(final Supplier<CompletableFuture<AsyncConnection>> connectionSupplier) {
this.connectionSupplier = connectionSupplier;
}
public AsyncConnection getConnection() {
return connection;
}
@Override
protected void before() throws Throwable {
this.connection = connectionSupplier.get().join();
}
@Override
protected void after() {
if (this.connection != null) {
try {
connection.close();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}
}

View File

@ -1283,10 +1283,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility {
/**
* Stops mini hbase, zk, and hdfs clusters.
* @throws IOException
* @see #startMiniCluster(int)
*/
public void shutdownMiniCluster() throws Exception {
public void shutdownMiniCluster() throws IOException {
LOG.info("Shutting down minicluster");
shutdownMiniHBaseCluster();
shutdownMiniDFSCluster();

View File

@ -0,0 +1,92 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase;
import java.io.IOException;
import java.util.concurrent.CompletableFuture;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.rules.ExternalResource;
import org.junit.rules.TestRule;
/**
* A {@link TestRule} that manages an instance of the {@link MiniHBaseCluster}. Can be used in
* either the {@link Rule} or {@link ClassRule} positions. Built on top of an instance of
* {@link HBaseTestingUtility}, so be weary of intermixing direct use of that class with this Rule.
* </p>
* Use in combination with {@link ConnectionRule}, for example:
*
* <pre>{@code
* public class TestMyClass {
* @ClassRule
* public static final MiniClusterRule miniClusterRule = new MiniClusterRule();
*
* @Rule
* public final ConnectionRule connectionRule =
* new ConnectionRule(miniClusterRule::createConnection);
* }
* }</pre>
*/
public class MiniClusterRule extends ExternalResource {
private final HBaseTestingUtility testingUtility;
private final StartMiniClusterOption miniClusterOptions;
private MiniHBaseCluster miniCluster;
/**
* Create an instance over the default options provided by {@link StartMiniClusterOption}.
*/
public MiniClusterRule() {
this(StartMiniClusterOption.builder().build());
}
/**
* Create an instance using the provided {@link StartMiniClusterOption}.
*/
public MiniClusterRule(final StartMiniClusterOption miniClusterOptions) {
this.testingUtility = new HBaseTestingUtility();
this.miniClusterOptions = miniClusterOptions;
}
/**
* Create a {@link AsyncConnection} to the managed {@link MiniHBaseCluster}. It's up to the caller
* to {@link AsyncConnection#close() close()} the connection when finished.
*/
public CompletableFuture<AsyncConnection> createConnection() {
if (miniCluster == null) {
throw new IllegalStateException("test cluster not initialized");
}
return ConnectionFactory.createAsyncConnection(miniCluster.getConf());
}
@Override
protected void before() throws Throwable {
miniCluster = testingUtility.startMiniCluster(miniClusterOptions);
}
@Override
protected void after() {
try {
testingUtility.shutdownMiniCluster();
} catch (IOException e) {
throw new RuntimeException(e);
}
}
}

View File

@ -0,0 +1,56 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.client.hamcrest;
import static org.hamcrest.core.Is.is;
import org.apache.hadoop.hbase.util.Bytes;
import org.hamcrest.Description;
import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeDiagnosingMatcher;
/**
* Helper methods for matching against values passed through the helper methods of {@link Bytes}.
*/
public final class BytesMatchers {
private BytesMatchers() {}
public static Matcher<byte[]> bytesAsStringBinary(final String binary) {
return bytesAsStringBinary(is(binary));
}
public static Matcher<byte[]> bytesAsStringBinary(final Matcher<String> matcher) {
return new TypeSafeDiagnosingMatcher<byte[]>() {
@Override protected boolean matchesSafely(byte[] item, Description mismatchDescription) {
final String binary = Bytes.toStringBinary(item);
if (matcher.matches(binary)) {
return true;
}
mismatchDescription.appendText("was a byte[] with a Bytes.toStringBinary value ");
matcher.describeMismatch(binary, mismatchDescription);
return false;
}
@Override public void describeTo(Description description) {
description
.appendText("has a byte[] with a Bytes.toStringBinary value that ")
.appendDescriptionOf(matcher);
}
};
}
}

View File

@ -0,0 +1,360 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.webapp;
import static org.apache.hadoop.hbase.client.hamcrest.BytesMatchers.bytesAsStringBinary;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.allOf;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.hasProperty;
import static org.hamcrest.Matchers.startsWith;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.hbase.ClearUserNamespacesAndTablesRule;
import org.apache.hadoop.hbase.ConnectionRule;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.MiniClusterRule;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncAdmin;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.util.RegionSplitter;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Rule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.RuleChain;
import org.junit.rules.TestName;
import org.junit.rules.TestRule;
import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils;
/**
* Cluster-backed correctness tests for the functionality provided by {@link MetaBrowser}.
*/
@Category({ MasterTests.class, MediumTests.class})
public class TestMetaBrowser {
@ClassRule
public static final HBaseClassTestRule testRule =
HBaseClassTestRule.forClass(TestMetaBrowser.class);
@ClassRule
public static final MiniClusterRule miniClusterRule = new MiniClusterRule();
private final ConnectionRule connectionRule =
new ConnectionRule(miniClusterRule::createConnection);
private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule =
new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
@Rule
public TestRule rule = RuleChain.outerRule(connectionRule)
.around(clearUserNamespacesAndTablesRule);
@Rule
public TestName testNameRule = new TestName();
private AsyncConnection connection;
private AsyncAdmin admin;
@Before
public void before() {
connection = connectionRule.getConnection();
admin = connection.getAdmin();
}
@Test
public void noFilters() {
final String namespaceName = testNameRule.getMethodName();
final TableName a = TableName.valueOf("a");
final TableName b = TableName.valueOf(namespaceName, "b");
CompletableFuture.allOf(
createTable(a),
createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2)))
.join();
final HttpServletRequest request = new MockRequestBuilder().build();
final List<RegionReplicaInfo> rows;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) {
rows = IterableUtils.toList(results);
}
assertThat(rows, contains(
hasProperty("row", bytesAsStringBinary(startsWith(a + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000")))));
}
@Test
public void limit() {
final String tableName = testNameRule.getMethodName();
createTable(TableName.valueOf(tableName), 8).join();
final HttpServletRequest request = new MockRequestBuilder()
.setLimit(5)
.build();
final List<RegionReplicaInfo> rows;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) {
rows = IterableUtils.toList(results);
}
assertThat(rows, contains(
hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",20000000"))),
hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",40000000"))),
hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",60000000"))),
hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",80000000")))));
}
@Test
public void regionStateFilter() {
final String namespaceName = testNameRule.getMethodName();
final TableName foo = TableName.valueOf(namespaceName, "foo");
final TableName bar = TableName.valueOf(namespaceName, "bar");
createNamespace(namespaceName)
.thenCompose(_void1 -> CompletableFuture.allOf(
createTable(foo, 2).thenCompose(_void2 -> admin.disableTable(foo)),
createTable(bar, 2)))
.join();
final HttpServletRequest request = new MockRequestBuilder()
.setLimit(10_000)
.setRegionState(RegionState.State.OPEN)
.setTable(namespaceName)
.build();
final List<RegionReplicaInfo> rows;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) {
rows = IterableUtils.toList(results);
}
assertThat(rows, contains(
hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",80000000")))));
}
@Test
public void scanTableFilter() {
final String namespaceName = testNameRule.getMethodName();
final TableName a = TableName.valueOf("a");
final TableName b = TableName.valueOf(namespaceName, "b");
CompletableFuture.allOf(
createTable(a),
createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2)))
.join();
final HttpServletRequest request = new MockRequestBuilder()
.setTable(namespaceName)
.build();
final List<RegionReplicaInfo> rows;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) {
rows = IterableUtils.toList(results);
}
assertThat(rows, contains(
hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000")))));
}
@Test
public void paginateWithReplicas() {
final String namespaceName = testNameRule.getMethodName();
final TableName a = TableName.valueOf("a");
final TableName b = TableName.valueOf(namespaceName, "b");
CompletableFuture.allOf(
createTableWithReplicas(a, 2),
createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2)))
.join();
final HttpServletRequest request1 = new MockRequestBuilder()
.setLimit(2)
.build();
final List<RegionReplicaInfo> rows1;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request1).getResults()) {
rows1 = IterableUtils.toList(results);
}
assertThat(rows1, contains(
allOf(
hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))),
hasProperty("replicaId", equalTo(0))),
allOf(
hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))),
hasProperty("replicaId", equalTo(1)))));
final HttpServletRequest request2 = new MockRequestBuilder()
.setLimit(2)
.setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow()))
.build();
final List<RegionReplicaInfo> rows2;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request2).getResults()) {
rows2 = IterableUtils.toList(results);
}
assertThat(rows2, contains(
hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000")))));
}
@Test
public void paginateWithTableFilter() {
final String namespaceName = testNameRule.getMethodName();
final TableName a = TableName.valueOf("a");
final TableName b = TableName.valueOf(namespaceName, "b");
CompletableFuture.allOf(
createTable(a),
createNamespace(namespaceName).thenCompose(_void -> createTable(b, 5)))
.join();
final HttpServletRequest request1 = new MockRequestBuilder()
.setLimit(2)
.setTable(namespaceName)
.build();
final List<RegionReplicaInfo> rows1;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request1).getResults()) {
rows1 = IterableUtils.toList(results);
}
assertThat(rows1, contains(
hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))),
hasProperty("row", bytesAsStringBinary(startsWith(b + ",33333333")))));
final HttpServletRequest request2 = new MockRequestBuilder()
.setLimit(2)
.setTable(namespaceName)
.setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow()))
.build();
final List<RegionReplicaInfo> rows2;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request2).getResults()) {
rows2 = IterableUtils.toList(results);
}
assertThat(rows2, contains(
hasProperty("row", bytesAsStringBinary(startsWith(b + ",66666666"))),
hasProperty("row", bytesAsStringBinary(startsWith(b + ",99999999")))));
final HttpServletRequest request3 = new MockRequestBuilder()
.setLimit(2)
.setTable(namespaceName)
.setStart(MetaBrowser.buildStartParamFrom(rows2.get(rows2.size() - 1).getRow()))
.build();
final List<RegionReplicaInfo> rows3;
try (final MetaBrowser.Results results = new MetaBrowser(connection, request3).getResults()) {
rows3 = IterableUtils.toList(results);
}
assertThat(rows3, contains(
hasProperty("row", bytesAsStringBinary(startsWith(b + ",cccccccc")))));
}
private ColumnFamilyDescriptor columnFamilyDescriptor() {
return ColumnFamilyDescriptorBuilder.of("f1");
}
private TableDescriptor tableDescriptor(final TableName tableName) {
return TableDescriptorBuilder.newBuilder(tableName)
.setColumnFamily(columnFamilyDescriptor())
.build();
}
private TableDescriptor tableDescriptor(final TableName tableName, final int replicaCount) {
return TableDescriptorBuilder.newBuilder(tableName)
.setRegionReplication(replicaCount)
.setColumnFamily(columnFamilyDescriptor())
.build();
}
private CompletableFuture<Void> createTable(final TableName tableName) {
return admin.createTable(tableDescriptor(tableName));
}
private CompletableFuture<Void> createTable(final TableName tableName, final int splitCount) {
return admin.createTable(
tableDescriptor(tableName),
new RegionSplitter.HexStringSplit().split(splitCount));
}
private CompletableFuture<Void> createTableWithReplicas(final TableName tableName,
final int replicaCount) {
return admin.createTable(tableDescriptor(tableName, replicaCount));
}
private CompletableFuture<Void> createNamespace(final String namespace) {
final NamespaceDescriptor descriptor = NamespaceDescriptor.create(namespace).build();
return admin.createNamespace(descriptor);
}
/**
* Helper for mocking an {@link HttpServletRequest} relevant to the test.
*/
static class MockRequestBuilder {
private String limit = null;
private String regionState = null;
private String start = null;
private String table = null;
public MockRequestBuilder setLimit(final int value) {
this.limit = Integer.toString(value);
return this;
}
public MockRequestBuilder setLimit(final String value) {
this.limit = value;
return this;
}
public MockRequestBuilder setRegionState(final RegionState.State value) {
this.regionState = value.toString();
return this;
}
public MockRequestBuilder setRegionState(final String value) {
this.regionState = value;
return this;
}
public MockRequestBuilder setStart(final String value) {
this.start = value;
return this;
}
public MockRequestBuilder setTable(final String value) {
this.table = value;
return this;
}
public HttpServletRequest build() {
final HttpServletRequest request = mock(HttpServletRequest.class);
when(request.getRequestURI()).thenReturn("/table.jsp");
when(request.getParameter("name")).thenReturn("hbase%3Ameta");
when(request.getParameter("scan_limit")).thenReturn(limit);
when(request.getParameter("scan_region_state")).thenReturn(regionState);
when(request.getParameter("scan_start")).thenReturn(start);
when(request.getParameter("scan_table")).thenReturn(table);
return request;
}
}
}

View File

@ -0,0 +1,168 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master.webapp;
import static org.hamcrest.MatcherAssert.assertThat;
import static org.hamcrest.Matchers.contains;
import static org.hamcrest.Matchers.containsInAnyOrder;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNull;
import javax.servlet.http.HttpServletRequest;
import org.apache.hadoop.hbase.HBaseClassTestRule;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.client.AsyncConnection;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.master.webapp.TestMetaBrowser.MockRequestBuilder;
import org.apache.hadoop.hbase.testclassification.MasterTests;
import org.apache.hadoop.hbase.testclassification.SmallTests;
import org.junit.Before;
import org.junit.ClassRule;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.mockito.Mock;
import org.mockito.MockitoAnnotations;
/**
* Cluster-backed correctness tests for the functionality provided by {@link MetaBrowser}.
*/
@Category({ MasterTests.class, SmallTests.class})
public class TestMetaBrowserNoCluster {
@ClassRule
public static final HBaseClassTestRule testRule =
HBaseClassTestRule.forClass(TestMetaBrowserNoCluster.class);
@Mock
private AsyncConnection connection;
@Before
public void before() {
MockitoAnnotations.initMocks(this);
}
@Test
public void buildFirstPageQueryStringNoParams() {
final HttpServletRequest request = new MockRequestBuilder().build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertEquals("hbase:meta", metaBrowser.getName());
assertNull(metaBrowser.getScanLimit());
assertNull(metaBrowser.getScanRegionState());
assertNull(metaBrowser.getScanStart());
assertNull(metaBrowser.getScanTable());
assertEquals("/table.jsp?name=hbase%3Ameta", metaBrowser.buildFirstPageUrl());
}
@Test
public void buildFirstPageQueryStringNonNullParams() {
final HttpServletRequest request = new MockRequestBuilder()
.setLimit(50)
.setRegionState(RegionState.State.ABNORMALLY_CLOSED)
.setTable("foo%3Abar")
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertEquals(50, metaBrowser.getScanLimit().intValue());
assertEquals(RegionState.State.ABNORMALLY_CLOSED, metaBrowser.getScanRegionState());
assertEquals(TableName.valueOf("foo", "bar"), metaBrowser.getScanTable());
assertEquals(
"/table.jsp?name=hbase%3Ameta"
+ "&scan_limit=50"
+ "&scan_region_state=ABNORMALLY_CLOSED"
+ "&scan_table=foo%3Abar",
metaBrowser.buildNextPageUrl(null));
}
@Test
public void buildNextPageQueryString() {
final HttpServletRequest request = new MockRequestBuilder().build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertEquals(
"/table.jsp?name=hbase%3Ameta&scan_start=%255Cx80%255Cx00%255Cx7F",
metaBrowser.buildNextPageUrl(new byte[] { Byte.MIN_VALUE, (byte) 0, Byte.MAX_VALUE }));
}
@Test
public void unparseableLimitParam() {
final HttpServletRequest request = new MockRequestBuilder()
.setLimit("foo")
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertNull(metaBrowser.getScanLimit());
assertThat(metaBrowser.getErrorMessages(), contains(
"Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer."));
}
@Test
public void zeroLimitParam() {
final HttpServletRequest request = new MockRequestBuilder()
.setLimit(0)
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertEquals(MetaBrowser.SCAN_LIMIT_DEFAULT, metaBrowser.getScanLimit().intValue());
assertThat(metaBrowser.getErrorMessages(), contains(
"Requested SCAN_LIMIT value 0 is <= 0."));
}
@Test
public void negativeLimitParam() {
final HttpServletRequest request = new MockRequestBuilder()
.setLimit(-10)
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertEquals(MetaBrowser.SCAN_LIMIT_DEFAULT, metaBrowser.getScanLimit().intValue());
assertThat(metaBrowser.getErrorMessages(), contains(
"Requested SCAN_LIMIT value -10 is <= 0."));
}
@Test
public void excessiveLimitParam() {
final HttpServletRequest request = new MockRequestBuilder()
.setLimit(10_001)
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertEquals(MetaBrowser.SCAN_LIMIT_MAX, metaBrowser.getScanLimit().intValue());
assertThat(metaBrowser.getErrorMessages(), contains(
"Requested SCAN_LIMIT value 10001 exceeds maximum value 10000."));
}
@Test
public void invalidRegionStateParam() {
final HttpServletRequest request = new MockRequestBuilder()
.setRegionState("foo")
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertNull(metaBrowser.getScanRegionState());
assertThat(metaBrowser.getErrorMessages(), contains(
"Requested SCAN_REGION_STATE value 'foo' cannot be parsed as a RegionState."));
}
@Test
public void multipleErrorMessages() {
final HttpServletRequest request = new MockRequestBuilder()
.setLimit("foo")
.setRegionState("bar")
.build();
final MetaBrowser metaBrowser = new MetaBrowser(connection, request);
assertThat(metaBrowser.getErrorMessages(), containsInAnyOrder(
"Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.",
"Requested SCAN_REGION_STATE value 'bar' cannot be parsed as a RegionState."
));
}
}

View File

@ -2152,6 +2152,12 @@
<version>${hamcrest.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.hamcrest</groupId>
<artifactId>hamcrest-library</artifactId>
<version>${hamcrest.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.mockito</groupId>
<artifactId>mockito-core</artifactId>