From 00fc46756abb99de6f833997499505f89c9752e8 Mon Sep 17 00:00:00 2001 From: Nick Dimiduk Date: Thu, 16 Jan 2020 08:46:39 -0800 Subject: [PATCH] HBASE-23653 Expose content of meta table in web ui (#1020) Adds a display of the content of 'hbase:meta' to the Master's table.jsp, when that table is selected. Supports basic pagination, filtering, &c. Signed-off-by: stack Signed-off-by: Bharath Vissapragada --- .../apache/hadoop/hbase/RegionLocations.java | 10 +- .../hbase/client/TableDescriptorBuilder.java | 1 + hbase-server/pom.xml | 5 + .../hbase/master/webapp/MetaBrowser.java | 424 ++++++++++++++++++ .../master/webapp/RegionReplicaInfo.java | 143 ++++++ .../resources/hbase-webapps/master/table.jsp | 197 +++++++- .../ClearUserNamespacesAndTablesRule.java | 167 +++++++ .../apache/hadoop/hbase/ConnectionRule.java | 75 ++++ .../hadoop/hbase/HBaseTestingUtility.java | 3 +- .../apache/hadoop/hbase/MiniClusterRule.java | 92 ++++ .../hbase/client/hamcrest/BytesMatchers.java | 56 +++ .../hbase/master/webapp/TestMetaBrowser.java | 360 +++++++++++++++ .../webapp/TestMetaBrowserNoCluster.java | 168 +++++++ pom.xml | 6 + 14 files changed, 1680 insertions(+), 27 deletions(-) create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/MetaBrowser.java create mode 100644 hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/RegionReplicaInfo.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowser.java create mode 100644 hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowserNoCluster.java diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index e119ebbb2fc..0d3a464e0f8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -18,7 +18,10 @@ package org.apache.hadoop.hbase; +import java.util.Arrays; import java.util.Collection; +import java.util.Iterator; + import org.apache.hadoop.hbase.client.RegionInfo; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; @@ -31,7 +34,7 @@ import org.apache.yetus.audience.InterfaceAudience; * (assuming small number of locations) */ @InterfaceAudience.Private -public class RegionLocations { +public class RegionLocations implements Iterable { private final int numNonNullElements; @@ -361,6 +364,11 @@ public class RegionLocations { return null; } + @Override + public Iterator iterator() { + return Arrays.asList(locations).iterator(); + } + @Override public String toString() { StringBuilder builder = new StringBuilder("["); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java index b97e85aab3b..20acf3af55a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/TableDescriptorBuilder.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; /** + * Convenience class for composing an instance of {@link TableDescriptor}. * @since 2.0.0 */ @InterfaceAudience.Public diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml index 741078b6a08..57227abccdd 100644 --- a/hbase-server/pom.xml +++ b/hbase-server/pom.xml @@ -442,6 +442,11 @@ hamcrest-core test + + org.hamcrest + hamcrest-library + test + org.bouncycastle bcprov-jdk15on diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/MetaBrowser.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/MetaBrowser.java new file mode 100644 index 00000000000..5b07427599c --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/MetaBrowser.java @@ -0,0 +1,424 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.webapp; + +import java.io.UnsupportedEncodingException; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.stream.StreamSupport; +import javax.servlet.http.HttpServletRequest; +import org.apache.commons.lang3.StringUtils; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.CompareOperator; +import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.AdvancedScanResultConsumer; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.client.AsyncTable; +import org.apache.hadoop.hbase.client.ResultScanner; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.filter.PrefixFilter; +import org.apache.hadoop.hbase.filter.SingleColumnValueFilter; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; +import org.apache.hbase.thirdparty.com.google.common.collect.Iterators; +import org.apache.hbase.thirdparty.io.netty.handler.codec.http.QueryStringEncoder; + +/** + *

+ * Support class for the "Meta Entries" section in {@code resources/hbase-webapps/master/table.jsp}. + *

+ *

+ * Interface. This class's intended consumer is {@code table.jsp}. As such, it's primary + * interface is the active {@link HttpServletRequest}, from which it uses the {@code scan_*} + * request parameters. This class supports paging through an optionally filtered view of the + * contents of {@code hbase:meta}. Those filters and the pagination offset are specified via these + * request parameters. It provides helper methods for constructing pagination links. + *

    + *
  • {@value #NAME_PARAM} - the name of the table requested. The only table of our concern here + * is {@code hbase:meta}; any other value is effectively ignored by the giant conditional in the + * jsp.
  • + *
  • {@value #SCAN_LIMIT_PARAM} - specifies a limit on the number of region (replicas) rendered + * on the by the table in a single request -- a limit on page size. This corresponds to the + * number of {@link RegionReplicaInfo} objects produced by {@link Results#iterator()}. When a + * value for {@code scan_limit} is invalid or not specified, the default value of + * {@value #SCAN_LIMIT_DEFAULT} is used. In order to avoid excessive resource consumption, a + * maximum value of {@value #SCAN_LIMIT_MAX} is enforced.
  • + *
  • {@value #SCAN_REGION_STATE_PARAM} - an optional filter on {@link RegionState}.
  • + *
  • {@value #SCAN_START_PARAM} - specifies the rowkey at which a scan should start. For usage + * details, see the below section on Pagination.
  • + *
  • {@value #SCAN_TABLE_PARAM} - specifies a filter on the values returned, limiting them to + * regions from a specified table. This parameter is implemented as a prefix filter on the + * {@link Scan}, so in effect it can be used for simple namespace and multi-table matches.
  • + *
+ *

+ *

+ * Pagination. A single page of results are made available via {@link #getResults()} / an + * instance of {@link Results}. Callers use its {@link Iterator} consume the page of + * {@link RegionReplicaInfo} instances, each of which represents a region or region replica. Helper + * methods are provided for building page navigation controls preserving the user's selected filter + * set: {@link #buildFirstPageUrl()}, {@link #buildNextPageUrl(byte[])}. Pagination is implemented + * using a simple offset + limit system. Offset is provided by the {@value #SCAN_START_PARAM}, + * limit via {@value #SCAN_LIMIT_PARAM}. Under the hood, the {@link Scan} is constructed with + * {@link Scan#setMaxResultSize(long)} set to ({@value SCAN_LIMIT_PARAM} +1), while the + * {@link Results} {@link Iterator} honors {@value #SCAN_LIMIT_PARAM}. The +1 allows the caller to + * know if a "next page" is available via {@link Results#hasMoreResults()}. Note that this + * pagination strategy is incomplete when it comes to region replicas and can potentially omit + * rendering replicas that fall between the last rowkey offset and {@code replicaCount % page size}. + *

+ *

+ * Error Messages. Any time there's an error parsing user input, a message will be populated + * in {@link #getErrorMessages()}. Any fields which produce an error will have their filter values + * set to the default, except for a value of {@value #SCAN_LIMIT_PARAM} that exceeds + * {@value #SCAN_LIMIT_MAX}, in which case {@value #SCAN_LIMIT_MAX} is used. + *

+ */ +@InterfaceAudience.Private +public class MetaBrowser { + public static final String NAME_PARAM = "name"; + public static final String SCAN_LIMIT_PARAM = "scan_limit"; + public static final String SCAN_REGION_STATE_PARAM = "scan_region_state"; + public static final String SCAN_START_PARAM = "scan_start"; + public static final String SCAN_TABLE_PARAM = "scan_table"; + + public static final int SCAN_LIMIT_DEFAULT = 10; + public static final int SCAN_LIMIT_MAX = 10_000; + + private final AsyncConnection connection; + private final HttpServletRequest request; + private final List errorMessages; + private final String name; + private final Integer scanLimit; + private final RegionState.State scanRegionState; + private final byte[] scanStart; + private final TableName scanTable; + + public MetaBrowser(final AsyncConnection connection, final HttpServletRequest request) { + this.connection = connection; + this.request = request; + this.errorMessages = new LinkedList<>(); + this.name = resolveName(request); + this.scanLimit = resolveScanLimit(request); + this.scanRegionState = resolveScanRegionState(request); + this.scanStart = resolveScanStart(request); + this.scanTable = resolveScanTable(request); + } + + public List getErrorMessages() { + return errorMessages; + } + + public String getName() { + return name; + } + + public Integer getScanLimit() { + return scanLimit; + } + + public byte[] getScanStart() { + return scanStart; + } + + public RegionState.State getScanRegionState() { + return scanRegionState; + } + + public TableName getScanTable() { + return scanTable; + } + + public Results getResults() { + final AsyncTable asyncTable = + connection.getTable(TableName.META_TABLE_NAME); + return new Results(asyncTable.getScanner(buildScan())); + } + + @Override + public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("scanStart", scanStart) + .append("scanLimit", scanLimit) + .append("scanTable", scanTable) + .append("scanRegionState", scanRegionState) + .toString(); + } + + private static String resolveName(final HttpServletRequest request) { + return resolveRequestParameter(request, NAME_PARAM); + } + + private Integer resolveScanLimit(final HttpServletRequest request) { + final String requestValueStr = resolveRequestParameter(request, SCAN_LIMIT_PARAM); + if (StringUtils.isBlank(requestValueStr)) { + return null; + } + + final Integer requestValue = tryParseInt(requestValueStr); + if (requestValue == null) { + errorMessages.add(buildScanLimitMalformedErrorMessage(requestValueStr)); + return null; + } + if (requestValue <= 0) { + errorMessages.add(buildScanLimitLTEQZero(requestValue)); + return SCAN_LIMIT_DEFAULT; + } + + final int truncatedValue = Math.min(requestValue, SCAN_LIMIT_MAX); + if (requestValue != truncatedValue) { + errorMessages.add(buildScanLimitExceededErrorMessage(requestValue)); + } + return truncatedValue; + } + + private RegionState.State resolveScanRegionState(final HttpServletRequest request) { + final String requestValueStr = resolveRequestParameter(request, SCAN_REGION_STATE_PARAM); + if (requestValueStr == null) { + return null; + } + final RegionState.State requestValue = tryValueOf(RegionState.State.class, requestValueStr); + if (requestValue == null) { + errorMessages.add(buildScanRegionStateMalformedErrorMessage(requestValueStr)); + return null; + } + return requestValue; + } + + private static byte[] resolveScanStart(final HttpServletRequest request) { + // TODO: handle replicas that fall between the last rowkey and pagination limit. + final String requestValue = resolveRequestParameter(request, SCAN_START_PARAM); + if (requestValue == null) { + return null; + } + return Bytes.toBytesBinary(requestValue); + } + + private static TableName resolveScanTable(final HttpServletRequest request) { + final String requestValue = resolveRequestParameter(request, SCAN_TABLE_PARAM); + if (requestValue == null) { + return null; + } + return TableName.valueOf(requestValue); + } + + private static String resolveRequestParameter(final HttpServletRequest request, + final String param) { + if (request == null) { + return null; + } + final String requestValueStrEnc = request.getParameter(param); + if (StringUtils.isBlank(requestValueStrEnc)) { + return null; + } + return urlDecode(requestValueStrEnc); + } + + private static Filter buildTableFilter(final TableName tableName) { + return new PrefixFilter(tableName.toBytes()); + } + + private static Filter buildScanRegionStateFilter(final RegionState.State state) { + return new SingleColumnValueFilter( + HConstants.CATALOG_FAMILY, + HConstants.STATE_QUALIFIER, + CompareOperator.EQUAL, + // use the same serialization strategy as found in MetaTableAccessor#addRegionStateToPut + Bytes.toBytes(state.name())); + } + + private Filter buildScanFilter() { + if (scanTable == null && scanRegionState == null) { + return null; + } + + final List filters = new ArrayList<>(2); + if (scanTable != null) { + filters.add(buildTableFilter(scanTable)); + } + if (scanRegionState != null) { + filters.add(buildScanRegionStateFilter(scanRegionState)); + } + if (filters.size() == 1) { + return filters.get(0); + } + return new FilterList(FilterList.Operator.MUST_PASS_ALL, filters); + } + + private Scan buildScan() { + final Scan metaScan = new Scan() + .addFamily(HConstants.CATALOG_FAMILY) + .readVersions(1) + .setLimit((scanLimit != null ? scanLimit : SCAN_LIMIT_DEFAULT) + 1); + if (scanStart != null) { + metaScan.withStartRow(scanStart, false); + } + final Filter filter = buildScanFilter(); + if (filter != null) { + metaScan.setFilter(filter); + } + return metaScan; + } + + /** + * Adds {@code value} to {@code encoder} under {@code paramName} when {@code value} is non-null. + */ + private void addParam(final QueryStringEncoder encoder, final String paramName, + final Object value) { + if (value != null) { + encoder.addParam(paramName, value.toString()); + } + } + + private QueryStringEncoder buildFirstPageEncoder() { + final QueryStringEncoder encoder = + new QueryStringEncoder(request.getRequestURI()); + addParam(encoder, NAME_PARAM, name); + addParam(encoder, SCAN_LIMIT_PARAM, scanLimit); + addParam(encoder, SCAN_REGION_STATE_PARAM, scanRegionState); + addParam(encoder, SCAN_TABLE_PARAM, scanTable); + return encoder; + } + + public String buildFirstPageUrl() { + return buildFirstPageEncoder().toString(); + } + + static String buildStartParamFrom(final byte[] lastRow) { + if (lastRow == null) { + return null; + } + return urlEncode(Bytes.toStringBinary(lastRow)); + } + + public String buildNextPageUrl(final byte[] lastRow) { + final QueryStringEncoder encoder = buildFirstPageEncoder(); + final String startRow = buildStartParamFrom(lastRow); + addParam(encoder, SCAN_START_PARAM, startRow); + return encoder.toString(); + } + + private static String urlEncode(final String val) { + if (StringUtils.isEmpty(val)) { + return null; + } + try { + return URLEncoder.encode(val, StandardCharsets.UTF_8.toString()); + } catch (UnsupportedEncodingException e) { + return null; + } + } + + private static String urlDecode(final String val) { + if (StringUtils.isEmpty(val)) { + return null; + } + try { + return URLDecoder.decode(val, StandardCharsets.UTF_8.toString()); + } catch (UnsupportedEncodingException e) { + return null; + } + } + + private static Integer tryParseInt(final String val) { + if (StringUtils.isEmpty(val)) { + return null; + } + try { + return Integer.parseInt(val); + } catch (NumberFormatException e) { + return null; + } + } + + private static > T tryValueOf(final Class clazz, + final String value) { + if (clazz == null || value == null) { + return null; + } + try { + return T.valueOf(clazz, value); + } catch (IllegalArgumentException e) { + return null; + } + } + + private static String buildScanLimitExceededErrorMessage(final int requestValue) { + return String.format( + "Requested SCAN_LIMIT value %d exceeds maximum value %d.", requestValue, SCAN_LIMIT_MAX); + } + + private static String buildScanLimitMalformedErrorMessage(final String requestValue) { + return String.format( + "Requested SCAN_LIMIT value '%s' cannot be parsed as an integer.", requestValue); + } + + private static String buildScanLimitLTEQZero(final int requestValue) { + return String.format("Requested SCAN_LIMIT value %d is <= 0.", requestValue); + } + + private static String buildScanRegionStateMalformedErrorMessage(final String requestValue) { + return String.format( + "Requested SCAN_REGION_STATE value '%s' cannot be parsed as a RegionState.", requestValue); + } + + /** + * Encapsulates the results produced by this {@link MetaBrowser} instance. + */ + public final class Results implements AutoCloseable, Iterable { + + private final ResultScanner resultScanner; + private final Iterator sourceIterator; + + private Results(final ResultScanner resultScanner) { + this.resultScanner = resultScanner; + this.sourceIterator = StreamSupport.stream(resultScanner.spliterator(), false) + .map(RegionReplicaInfo::from) + .flatMap(Collection::stream) + .iterator(); + } + + /** + * @return {@code true} when the underlying {@link ResultScanner} is not yet exhausted, + * {@code false} otherwise. + */ + public boolean hasMoreResults() { + return sourceIterator.hasNext(); + } + + @Override + public void close() { + if (resultScanner != null) { + resultScanner.close(); + } + } + + @Override public Iterator iterator() { + return Iterators.limit(sourceIterator, scanLimit != null ? scanLimit : SCAN_LIMIT_DEFAULT); + } + } +} diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/RegionReplicaInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/RegionReplicaInfo.java new file mode 100644 index 00000000000..554d49bfc35 --- /dev/null +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/webapp/RegionReplicaInfo.java @@ -0,0 +1,143 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.webapp; + +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.StreamSupport; +import org.apache.commons.lang3.builder.EqualsBuilder; +import org.apache.commons.lang3.builder.HashCodeBuilder; +import org.apache.commons.lang3.builder.ToStringBuilder; +import org.apache.commons.lang3.builder.ToStringStyle; +import org.apache.hadoop.hbase.HRegionLocation; +import org.apache.hadoop.hbase.MetaTableAccessor; +import org.apache.hadoop.hbase.RegionLocations; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.client.RegionInfo; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.assignment.RegionStateStore; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; + +/** + * A POJO that consolidates the information about a single region replica that's stored in meta. + */ +@InterfaceAudience.Private +public final class RegionReplicaInfo { + private final byte[] row; + private final RegionInfo regionInfo; + private final RegionState.State regionState; + private final ServerName serverName; + + private RegionReplicaInfo(final Result result, final HRegionLocation location) { + this.row = result != null ? result.getRow() : null; + this.regionInfo = location != null ? location.getRegion() : null; + this.regionState = (result != null && regionInfo != null) + ? RegionStateStore.getRegionState(result, regionInfo) + : null; + this.serverName = location != null ? location.getServerName() : null; + } + + public static List from(final Result result) { + if (result == null) { + return Collections.singletonList(null); + } + + final RegionLocations locations = MetaTableAccessor.getRegionLocations(result); + if (locations == null) { + return Collections.singletonList(null); + } + + return StreamSupport.stream(locations.spliterator(), false) + .map(location -> new RegionReplicaInfo(result, location)) + .collect(Collectors.toList()); + } + + public byte[] getRow() { + return row; + } + + public RegionInfo getRegionInfo() { + return regionInfo; + } + + public byte[] getRegionName() { + return regionInfo != null ? regionInfo.getRegionName() : null; + } + + public byte[] getStartKey() { + return regionInfo != null ? regionInfo.getStartKey() : null; + } + + public byte[] getEndKey() { + return regionInfo != null ? regionInfo.getEndKey() : null; + } + + public Integer getReplicaId() { + return regionInfo != null ? regionInfo.getReplicaId() : null; + } + + public RegionState.State getRegionState() { + return regionState; + } + + public ServerName getServerName() { + return serverName; + } + + @Override + public boolean equals(Object other) { + if (this == other) { + return true; + } + + if (other == null || getClass() != other.getClass()) { + return false; + } + + RegionReplicaInfo that = (RegionReplicaInfo) other; + + return new EqualsBuilder() + .append(row, that.row) + .append(regionInfo, that.regionInfo) + .append(regionState, that.regionState) + .append(serverName, that.serverName) + .isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(17, 37) + .append(row) + .append(regionInfo) + .append(regionState) + .append(serverName) + .toHashCode(); + } + + @Override public String toString() { + return new ToStringBuilder(this, ToStringStyle.SHORT_PREFIX_STYLE) + .append("row", Bytes.toStringBinary(row)) + .append("regionInfo", regionInfo) + .append("regionState", regionState) + .append("serverName", serverName) + .toString(); + } +} diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 6a76d995a70..b5c677a9a53 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -17,76 +17,96 @@ * limitations under the License. */ --%> -<%@page import="java.net.URLEncoder"%> <%@ page contentType="text/html;charset=UTF-8" import="static org.apache.commons.lang3.StringEscapeUtils.escapeXml" + import="java.net.URLEncoder" import="java.util.ArrayList" import="java.util.Collection" import="java.util.HashMap" import="java.util.LinkedHashMap" import="java.util.List" import="java.util.Map" + import="java.util.Optional" import="java.util.TreeMap" - import=" java.util.concurrent.TimeUnit" + import="java.util.concurrent.TimeUnit" import="org.apache.commons.lang3.StringEscapeUtils" import="org.apache.hadoop.conf.Configuration" - import="org.apache.hadoop.hbase.HTableDescriptor" import="org.apache.hadoop.hbase.HColumnDescriptor" import="org.apache.hadoop.hbase.HConstants" import="org.apache.hadoop.hbase.HRegionLocation" + import="org.apache.hadoop.hbase.HTableDescriptor" + import="org.apache.hadoop.hbase.RegionMetrics" + import="org.apache.hadoop.hbase.RegionMetricsBuilder" + import="org.apache.hadoop.hbase.ServerMetrics" import="org.apache.hadoop.hbase.ServerName" + import="org.apache.hadoop.hbase.Size" import="org.apache.hadoop.hbase.TableName" import="org.apache.hadoop.hbase.TableNotFoundException" import="org.apache.hadoop.hbase.client.AsyncAdmin" import="org.apache.hadoop.hbase.client.AsyncConnection" import="org.apache.hadoop.hbase.client.CompactionState" - import="org.apache.hadoop.hbase.client.ConnectionFactory" import="org.apache.hadoop.hbase.client.RegionInfo" import="org.apache.hadoop.hbase.client.RegionInfoBuilder" import="org.apache.hadoop.hbase.client.RegionLocator" import="org.apache.hadoop.hbase.client.RegionReplicaUtil" import="org.apache.hadoop.hbase.client.Table" import="org.apache.hadoop.hbase.master.HMaster" - import="org.apache.hadoop.hbase.master.assignment.RegionStates" import="org.apache.hadoop.hbase.master.RegionState" + import="org.apache.hadoop.hbase.master.assignment.RegionStates" + import="org.apache.hadoop.hbase.master.webapp.MetaBrowser" + import="org.apache.hadoop.hbase.master.webapp.RegionReplicaInfo" import="org.apache.hadoop.hbase.quotas.QuotaSettingsFactory" - import="org.apache.hadoop.hbase.quotas.QuotaTableUtil" - import="org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot" - import="org.apache.hadoop.hbase.quotas.ThrottleSettings" - import="org.apache.hadoop.hbase.util.Bytes" - import="org.apache.hadoop.hbase.util.FSUtils" - import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator" - import="org.apache.hadoop.util.StringUtils" - import="org.apache.hbase.thirdparty.com.google.protobuf.ByteString"%> + import="org.apache.hadoop.hbase.quotas.QuotaTableUtil"%> +<%@ page import="org.apache.hadoop.hbase.quotas.SpaceQuotaSnapshot" %> +<%@ page import="org.apache.hadoop.hbase.quotas.ThrottleSettings" %> +<%@ page import="org.apache.hadoop.hbase.util.Bytes" %> +<%@ page import="org.apache.hadoop.hbase.util.FSUtils" %> +<%@ page import="org.apache.hadoop.hbase.zookeeper.MetaTableLocator" %> +<%@ page import="org.apache.hadoop.util.StringUtils" %> +<%@ page import="org.apache.hbase.thirdparty.com.google.protobuf.ByteString" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.Quotas" %> <%@ page import="org.apache.hadoop.hbase.shaded.protobuf.generated.QuotaProtos.SpaceQuota" %> -<%@ page import="org.apache.hadoop.hbase.ServerMetrics" %> -<%@ page import="org.apache.hadoop.hbase.RegionMetrics" %> -<%@ page import="org.apache.hadoop.hbase.Size" %> -<%@ page import="org.apache.hadoop.hbase.RegionMetricsBuilder" %> <%! /** * @return An empty region load stamped with the passed in regionInfo * region name. */ - private RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) { + private static RegionMetrics getEmptyRegionMetrics(final RegionInfo regionInfo) { return RegionMetricsBuilder.toRegionMetrics(ClusterStatusProtos.RegionLoad.newBuilder(). setRegionSpecifier(HBaseProtos.RegionSpecifier.newBuilder(). setType(HBaseProtos.RegionSpecifier.RegionSpecifierType.REGION_NAME). setValue(ByteString.copyFrom(regionInfo.getRegionName())).build()).build()); } + + /** + * Given dicey information that may or not be available in meta, render a link to the region on + * its region server. + * @return an anchor tag if one can be built, {@code null} otherwise. + */ + private static String buildRegionServerLink(final ServerName serverName, final int rsInfoPort, + final RegionInfo regionInfo, final RegionState.State regionState) { + if (serverName == null || regionInfo == null) { return null; } + + if (regionState != RegionState.State.OPEN) { + // region is assigned to RS, but RS knows nothing of it. don't bother with a link. + return serverName.getServerName(); + } + + final String socketAddress = serverName.getHostname() + ":" + rsInfoPort; + final String URI = "//" + socketAddress + "/region.jsp" + + "?name=" + regionInfo.getEncodedName(); + return "" + serverName.getServerName() + ""; + } %> <% - final String ZEROKB = "0 KB"; final String ZEROMB = "0 MB"; HMaster master = (HMaster)getServletContext().getAttribute(HMaster.MASTER); Configuration conf = master.getConfiguration(); String fqtn = request.getParameter("name"); final String escaped_fqtn = StringEscapeUtils.escapeHtml4(fqtn); Table table; - String tableHeader; boolean withReplica = false; boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false); boolean readOnly = conf.getBoolean("hbase.master.ui.readonly", false); @@ -127,8 +147,11 @@ pageTitle = "Table: " + escaped_fqtn; } pageContext.setAttribute("pageTitle", pageTitle); - AsyncConnection connection = ConnectionFactory.createAsyncConnection(master.getConfiguration()).get(); - AsyncAdmin admin = connection.getAdminBuilder().setOperationTimeout(5, TimeUnit.SECONDS).build(); + final AsyncConnection connection = master.getAsyncConnection(); + final AsyncAdmin admin = connection.getAdminBuilder() + .setOperationTimeout(5, TimeUnit.SECONDS) + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); %> @@ -351,6 +374,134 @@ if (fqtn != null && master.isInitialized()) { +

Meta Entries

+<% + if (!metaBrowser.getErrorMessages().isEmpty()) { + for (final String errorMessage : metaBrowser.getErrorMessages()) { +%> + +<% + } + } +%> + + + + + + + + + +<% + final boolean metaScanHasMore; + byte[] lastRow = null; + try (final MetaBrowser.Results results = metaBrowser.getResults()) { + for (final RegionReplicaInfo regionReplicaInfo : results) { + lastRow = Optional.ofNullable(regionReplicaInfo) + .map(RegionReplicaInfo::getRow) + .orElse(null); + if (regionReplicaInfo == null) { +%> + + + +<% + continue; + } + + final String regionNameDisplay = regionReplicaInfo.getRegionName() != null + ? Bytes.toStringBinary(regionReplicaInfo.getRegionName()) + : ""; + final String startKeyDisplay = regionReplicaInfo.getStartKey() != null + ? Bytes.toStringBinary(regionReplicaInfo.getStartKey()) + : ""; + final String endKeyDisplay = regionReplicaInfo.getEndKey() != null + ? Bytes.toStringBinary(regionReplicaInfo.getEndKey()) + : ""; + final String replicaIdDisplay = regionReplicaInfo.getReplicaId() != null + ? regionReplicaInfo.getReplicaId().toString() + : ""; + final String regionStateDisplay = regionReplicaInfo.getRegionState() != null + ? regionReplicaInfo.getRegionState().toString() + : ""; + + final RegionInfo regionInfo = regionReplicaInfo.getRegionInfo(); + final ServerName serverName = regionReplicaInfo.getServerName(); + final RegionState.State regionState = regionReplicaInfo.getRegionState(); + final int rsPort = master.getRegionServerInfoPort(serverName); +%> + + + + + + + + +<% + } + + metaScanHasMore = results.hasMoreResults(); + } +%> +
RegionNameStart KeyEnd KeyReplica IDRegionStateServerName
Null result
<%= regionNameDisplay %><%= startKeyDisplay %><%= endKeyDisplay %><%= replicaIdDisplay %><%= regionStateDisplay %><%= buildRegionServerLink(serverName, rsPort, regionInfo, regionState) %>
+
+
+
    +
  • + + + +
  • + > + aria-label="Next"> + + + +
+
+
+
+ +
+ + + aria-describedby="scan-limit" style="display:inline; width:auto" /> + + + aria-describedby="scan-filter-table" style="display:inline; width:auto" /> + + + +
+
+
+
<%} else { RegionStates states = master.getAssignmentManager().getRegionStates(); Map> regionStates = states.getRegionByStateOfTable(table.getName()); @@ -838,8 +989,6 @@ if (withReplica) { for(StackTraceElement element : ex.getStackTrace()) { %><%= StringEscapeUtils.escapeHtml4(element.toString()) %><% } -} finally { - connection.close(); } } // end else %> diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java new file mode 100644 index 00000000000..6400eb8553b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ClearUserNamespacesAndTablesRule.java @@ -0,0 +1,167 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.util.List; +import java.util.Objects; +import java.util.StringJoiner; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import org.apache.hadoop.hbase.client.AsyncAdmin; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.rules.ExternalResource; +import org.junit.rules.TestRule; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A {@link TestRule} that clears all user namespaces and tables + * {@link ExternalResource#before() before} the test executes. Can be used in either the + * {@link Rule} or {@link ClassRule} positions. Lazily realizes the provided + * {@link AsyncConnection} so as to avoid initialization races with other {@link Rule Rules}. + * Does not {@link AsyncConnection#close() close()} provided connection instance when + * finished. + *

+ * Use in combination with {@link MiniClusterRule} and {@link ConnectionRule}, for example: + * + *
{@code
+ *   public class TestMyClass {
+ *     @ClassRule
+ *     public static final MiniClusterRule miniClusterRule = new MiniClusterRule();
+ *
+ *     private final ConnectionRule connectionRule =
+ *       new ConnectionRule(miniClusterRule::createConnection);
+ *     private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule =
+ *       new ClearUserNamespacesAndTablesRule(connectionRule::getConnection);
+ *
+ *     @Rule
+ *     public TestRule rule = RuleChain
+ *       .outerRule(connectionRule)
+ *       .around(clearUserNamespacesAndTablesRule);
+ *   }
+ * }
+ */ +public class ClearUserNamespacesAndTablesRule extends ExternalResource { + private static final Logger logger = + LoggerFactory.getLogger(ClearUserNamespacesAndTablesRule.class); + + private final Supplier connectionSupplier; + private AsyncAdmin admin; + + public ClearUserNamespacesAndTablesRule(final Supplier connectionSupplier) { + this.connectionSupplier = connectionSupplier; + } + + @Override + protected void before() throws Throwable { + final AsyncConnection connection = Objects.requireNonNull(connectionSupplier.get()); + admin = connection.getAdmin(); + + clearTablesAndNamespaces().join(); + } + + private CompletableFuture clearTablesAndNamespaces() { + return deleteUserTables().thenCompose(_void -> deleteUserNamespaces()); + } + + private CompletableFuture deleteUserTables() { + return listTableNames() + .thenApply(tableNames -> tableNames.stream() + .map(tableName -> disableIfEnabled(tableName).thenCompose(_void -> deleteTable(tableName))) + .toArray(CompletableFuture[]::new)) + .thenCompose(CompletableFuture::allOf); + } + + private CompletableFuture> listTableNames() { + return CompletableFuture + .runAsync(() -> logger.trace("listing tables")) + .thenCompose(_void -> admin.listTableNames(false)) + .thenApply(tableNames -> { + if (logger.isTraceEnabled()) { + final StringJoiner joiner = new StringJoiner(", ", "[", "]"); + tableNames.stream().map(TableName::getNameAsString).forEach(joiner::add); + logger.trace("found existing tables {}", joiner.toString()); + } + return tableNames; + }); + } + + private CompletableFuture isTableEnabled(final TableName tableName) { + return admin.isTableEnabled(tableName) + .thenApply(isEnabled -> { + logger.trace("table {} is enabled.", tableName); + return isEnabled; + }); + } + + private CompletableFuture disableIfEnabled(final TableName tableName) { + return isTableEnabled(tableName) + .thenCompose(isEnabled -> isEnabled + ? disableTable(tableName) + : CompletableFuture.completedFuture(null)); + } + + private CompletableFuture disableTable(final TableName tableName) { + return CompletableFuture + .runAsync(() -> logger.trace("disabling enabled table {}", tableName)) + .thenCompose(_void -> admin.disableTable(tableName)); + } + + private CompletableFuture deleteTable(final TableName tableName) { + return CompletableFuture + .runAsync(() -> logger.trace("deleting disabled table {}", tableName)) + .thenCompose(_void -> admin.deleteTable(tableName)); + } + + private CompletableFuture> listUserNamespaces() { + return CompletableFuture + .runAsync(() -> logger.trace("listing namespaces")) + .thenCompose(_void -> admin.listNamespaceDescriptors()) + .thenApply(namespaceDescriptors -> { + final StringJoiner joiner = new StringJoiner(", ", "[", "]"); + final List names = namespaceDescriptors.stream() + .map(NamespaceDescriptor::getName) + .peek(joiner::add) + .collect(Collectors.toList()); + logger.trace("found existing namespaces {}", joiner); + return names; + }) + .thenApply(namespaces -> namespaces.stream() + .filter(namespace -> !Objects.equals( + namespace, NamespaceDescriptor.SYSTEM_NAMESPACE.getName())) + .filter(namespace -> !Objects.equals( + namespace, NamespaceDescriptor.DEFAULT_NAMESPACE.getName())) + .collect(Collectors.toList())); + } + + private CompletableFuture deleteNamespace(final String namespace) { + return CompletableFuture + .runAsync(() -> logger.trace("deleting namespace {}", namespace)) + .thenCompose(_void -> admin.deleteNamespace(namespace)); + } + + private CompletableFuture deleteUserNamespaces() { + return listUserNamespaces() + .thenCompose(namespaces -> CompletableFuture.allOf(namespaces.stream() + .map(this::deleteNamespace) + .toArray(CompletableFuture[]::new))); + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java new file mode 100644 index 00000000000..bf4c5aa020e --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ConnectionRule.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import java.util.function.Supplier; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.rules.ExternalResource; + +/** + * A {@link Rule} that manages the lifecycle of an instance of {@link AsyncConnection}. Can be used + * in either the {@link Rule} or {@link ClassRule} positions. + *

+ * Use in combination with {@link MiniClusterRule}, for example: + * + *
{@code
+ *   public class TestMyClass {
+ *     private static final MiniClusterRule miniClusterRule = new MiniClusterRule();
+ *     private static final ConnectionRule connectionRule =
+ *       new ConnectionRule(miniClusterRule::createConnection);
+ *
+ *     @ClassRule
+ *     public static final TestRule rule = RuleChain
+ *       .outerRule(connectionRule)
+ *       .around(connectionRule);
+ *   }
+ * }
+ */ +public class ConnectionRule extends ExternalResource { + + private final Supplier> connectionSupplier; + private AsyncConnection connection; + + public ConnectionRule(final Supplier> connectionSupplier) { + this.connectionSupplier = connectionSupplier; + } + + public AsyncConnection getConnection() { + return connection; + } + + @Override + protected void before() throws Throwable { + this.connection = connectionSupplier.get().join(); + } + + @Override + protected void after() { + if (this.connection != null) { + try { + connection.close(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java index f1e91debafd..79160cdd0ac 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java @@ -1283,10 +1283,9 @@ public class HBaseTestingUtility extends HBaseZKTestingUtility { /** * Stops mini hbase, zk, and hdfs clusters. - * @throws IOException * @see #startMiniCluster(int) */ - public void shutdownMiniCluster() throws Exception { + public void shutdownMiniCluster() throws IOException { LOG.info("Shutting down minicluster"); shutdownMiniHBaseCluster(); shutdownMiniDFSCluster(); diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java new file mode 100644 index 00000000000..6ac4838275d --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/MiniClusterRule.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase; + +import java.io.IOException; +import java.util.concurrent.CompletableFuture; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.client.ConnectionFactory; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.rules.ExternalResource; +import org.junit.rules.TestRule; + +/** + * A {@link TestRule} that manages an instance of the {@link MiniHBaseCluster}. Can be used in + * either the {@link Rule} or {@link ClassRule} positions. Built on top of an instance of + * {@link HBaseTestingUtility}, so be weary of intermixing direct use of that class with this Rule. + *

+ * Use in combination with {@link ConnectionRule}, for example: + * + *
{@code
+ *   public class TestMyClass {
+ *     @ClassRule
+ *     public static final MiniClusterRule miniClusterRule = new MiniClusterRule();
+ *
+ *     @Rule
+ *     public final ConnectionRule connectionRule =
+ *       new ConnectionRule(miniClusterRule::createConnection);
+ *   }
+ * }
+ */ +public class MiniClusterRule extends ExternalResource { + private final HBaseTestingUtility testingUtility; + private final StartMiniClusterOption miniClusterOptions; + + private MiniHBaseCluster miniCluster; + + /** + * Create an instance over the default options provided by {@link StartMiniClusterOption}. + */ + public MiniClusterRule() { + this(StartMiniClusterOption.builder().build()); + } + + /** + * Create an instance using the provided {@link StartMiniClusterOption}. + */ + public MiniClusterRule(final StartMiniClusterOption miniClusterOptions) { + this.testingUtility = new HBaseTestingUtility(); + this.miniClusterOptions = miniClusterOptions; + } + + /** + * Create a {@link AsyncConnection} to the managed {@link MiniHBaseCluster}. It's up to the caller + * to {@link AsyncConnection#close() close()} the connection when finished. + */ + public CompletableFuture createConnection() { + if (miniCluster == null) { + throw new IllegalStateException("test cluster not initialized"); + } + return ConnectionFactory.createAsyncConnection(miniCluster.getConf()); + } + + @Override + protected void before() throws Throwable { + miniCluster = testingUtility.startMiniCluster(miniClusterOptions); + } + + @Override + protected void after() { + try { + testingUtility.shutdownMiniCluster(); + } catch (IOException e) { + throw new RuntimeException(e); + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java new file mode 100644 index 00000000000..581ac9fa394 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/client/hamcrest/BytesMatchers.java @@ -0,0 +1,56 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.client.hamcrest; + +import static org.hamcrest.core.Is.is; +import org.apache.hadoop.hbase.util.Bytes; +import org.hamcrest.Description; +import org.hamcrest.Matcher; +import org.hamcrest.TypeSafeDiagnosingMatcher; + +/** + * Helper methods for matching against values passed through the helper methods of {@link Bytes}. + */ +public final class BytesMatchers { + + private BytesMatchers() {} + + public static Matcher bytesAsStringBinary(final String binary) { + return bytesAsStringBinary(is(binary)); + } + + public static Matcher bytesAsStringBinary(final Matcher matcher) { + return new TypeSafeDiagnosingMatcher() { + @Override protected boolean matchesSafely(byte[] item, Description mismatchDescription) { + final String binary = Bytes.toStringBinary(item); + if (matcher.matches(binary)) { + return true; + } + mismatchDescription.appendText("was a byte[] with a Bytes.toStringBinary value "); + matcher.describeMismatch(binary, mismatchDescription); + return false; + } + + @Override public void describeTo(Description description) { + description + .appendText("has a byte[] with a Bytes.toStringBinary value that ") + .appendDescriptionOf(matcher); + } + }; + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowser.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowser.java new file mode 100644 index 00000000000..86230ef2d64 --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowser.java @@ -0,0 +1,360 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.webapp; + +import static org.apache.hadoop.hbase.client.hamcrest.BytesMatchers.bytesAsStringBinary; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.allOf; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.hasProperty; +import static org.hamcrest.Matchers.startsWith; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; +import java.util.List; +import java.util.concurrent.CompletableFuture; +import javax.servlet.http.HttpServletRequest; +import org.apache.hadoop.hbase.ClearUserNamespacesAndTablesRule; +import org.apache.hadoop.hbase.ConnectionRule; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.MiniClusterRule; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.AsyncAdmin; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor; +import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder; +import org.apache.hadoop.hbase.client.TableDescriptor; +import org.apache.hadoop.hbase.client.TableDescriptorBuilder; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.MediumTests; +import org.apache.hadoop.hbase.util.RegionSplitter; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.junit.rules.RuleChain; +import org.junit.rules.TestName; +import org.junit.rules.TestRule; +import org.apache.hbase.thirdparty.org.apache.commons.collections4.IterableUtils; + +/** + * Cluster-backed correctness tests for the functionality provided by {@link MetaBrowser}. + */ +@Category({ MasterTests.class, MediumTests.class}) +public class TestMetaBrowser { + + @ClassRule + public static final HBaseClassTestRule testRule = + HBaseClassTestRule.forClass(TestMetaBrowser.class); + @ClassRule + public static final MiniClusterRule miniClusterRule = new MiniClusterRule(); + + private final ConnectionRule connectionRule = + new ConnectionRule(miniClusterRule::createConnection); + private final ClearUserNamespacesAndTablesRule clearUserNamespacesAndTablesRule = + new ClearUserNamespacesAndTablesRule(connectionRule::getConnection); + + @Rule + public TestRule rule = RuleChain.outerRule(connectionRule) + .around(clearUserNamespacesAndTablesRule); + + @Rule + public TestName testNameRule = new TestName(); + + private AsyncConnection connection; + private AsyncAdmin admin; + + @Before + public void before() { + connection = connectionRule.getConnection(); + admin = connection.getAdmin(); + } + + @Test + public void noFilters() { + final String namespaceName = testNameRule.getMethodName(); + final TableName a = TableName.valueOf("a"); + final TableName b = TableName.valueOf(namespaceName, "b"); + + CompletableFuture.allOf( + createTable(a), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))) + .join(); + + final HttpServletRequest request = new MockRequestBuilder().build(); + final List rows; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { + rows = IterableUtils.toList(results); + } + assertThat(rows, contains( + hasProperty("row", bytesAsStringBinary(startsWith(a + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); + } + + @Test + public void limit() { + final String tableName = testNameRule.getMethodName(); + createTable(TableName.valueOf(tableName), 8).join(); + + final HttpServletRequest request = new MockRequestBuilder() + .setLimit(5) + .build(); + final List rows; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { + rows = IterableUtils.toList(results); + } + assertThat(rows, contains( + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",20000000"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",40000000"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",60000000"))), + hasProperty("row", bytesAsStringBinary(startsWith(tableName + ",80000000"))))); + } + + @Test + public void regionStateFilter() { + final String namespaceName = testNameRule.getMethodName(); + final TableName foo = TableName.valueOf(namespaceName, "foo"); + final TableName bar = TableName.valueOf(namespaceName, "bar"); + + createNamespace(namespaceName) + .thenCompose(_void1 -> CompletableFuture.allOf( + createTable(foo, 2).thenCompose(_void2 -> admin.disableTable(foo)), + createTable(bar, 2))) + .join(); + + final HttpServletRequest request = new MockRequestBuilder() + .setLimit(10_000) + .setRegionState(RegionState.State.OPEN) + .setTable(namespaceName) + .build(); + final List rows; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { + rows = IterableUtils.toList(results); + } + assertThat(rows, contains( + hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(bar.toString() + ",80000000"))))); + } + + @Test + public void scanTableFilter() { + final String namespaceName = testNameRule.getMethodName(); + final TableName a = TableName.valueOf("a"); + final TableName b = TableName.valueOf(namespaceName, "b"); + + CompletableFuture.allOf( + createTable(a), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))) + .join(); + + final HttpServletRequest request = new MockRequestBuilder() + .setTable(namespaceName) + .build(); + final List rows; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request).getResults()) { + rows = IterableUtils.toList(results); + } + assertThat(rows, contains( + hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); + } + + @Test + public void paginateWithReplicas() { + final String namespaceName = testNameRule.getMethodName(); + final TableName a = TableName.valueOf("a"); + final TableName b = TableName.valueOf(namespaceName, "b"); + + CompletableFuture.allOf( + createTableWithReplicas(a, 2), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 2))) + .join(); + + final HttpServletRequest request1 = new MockRequestBuilder() + .setLimit(2) + .build(); + final List rows1; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request1).getResults()) { + rows1 = IterableUtils.toList(results); + } + assertThat(rows1, contains( + allOf( + hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))), + hasProperty("replicaId", equalTo(0))), + allOf( + hasProperty("regionName", bytesAsStringBinary(startsWith(a + ",,"))), + hasProperty("replicaId", equalTo(1))))); + + final HttpServletRequest request2 = new MockRequestBuilder() + .setLimit(2) + .setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow())) + .build(); + final List rows2; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request2).getResults()) { + rows2 = IterableUtils.toList(results); + } + assertThat(rows2, contains( + hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",80000000"))))); + } + + @Test + public void paginateWithTableFilter() { + final String namespaceName = testNameRule.getMethodName(); + final TableName a = TableName.valueOf("a"); + final TableName b = TableName.valueOf(namespaceName, "b"); + + CompletableFuture.allOf( + createTable(a), + createNamespace(namespaceName).thenCompose(_void -> createTable(b, 5))) + .join(); + + final HttpServletRequest request1 = new MockRequestBuilder() + .setLimit(2) + .setTable(namespaceName) + .build(); + final List rows1; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request1).getResults()) { + rows1 = IterableUtils.toList(results); + } + assertThat(rows1, contains( + hasProperty("row", bytesAsStringBinary(startsWith(b + ",,"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",33333333"))))); + + final HttpServletRequest request2 = new MockRequestBuilder() + .setLimit(2) + .setTable(namespaceName) + .setStart(MetaBrowser.buildStartParamFrom(rows1.get(rows1.size() - 1).getRow())) + .build(); + final List rows2; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request2).getResults()) { + rows2 = IterableUtils.toList(results); + } + assertThat(rows2, contains( + hasProperty("row", bytesAsStringBinary(startsWith(b + ",66666666"))), + hasProperty("row", bytesAsStringBinary(startsWith(b + ",99999999"))))); + + final HttpServletRequest request3 = new MockRequestBuilder() + .setLimit(2) + .setTable(namespaceName) + .setStart(MetaBrowser.buildStartParamFrom(rows2.get(rows2.size() - 1).getRow())) + .build(); + final List rows3; + try (final MetaBrowser.Results results = new MetaBrowser(connection, request3).getResults()) { + rows3 = IterableUtils.toList(results); + } + assertThat(rows3, contains( + hasProperty("row", bytesAsStringBinary(startsWith(b + ",cccccccc"))))); + } + + private ColumnFamilyDescriptor columnFamilyDescriptor() { + return ColumnFamilyDescriptorBuilder.of("f1"); + } + + private TableDescriptor tableDescriptor(final TableName tableName) { + return TableDescriptorBuilder.newBuilder(tableName) + .setColumnFamily(columnFamilyDescriptor()) + .build(); + } + + private TableDescriptor tableDescriptor(final TableName tableName, final int replicaCount) { + return TableDescriptorBuilder.newBuilder(tableName) + .setRegionReplication(replicaCount) + .setColumnFamily(columnFamilyDescriptor()) + .build(); + } + + private CompletableFuture createTable(final TableName tableName) { + return admin.createTable(tableDescriptor(tableName)); + } + + private CompletableFuture createTable(final TableName tableName, final int splitCount) { + return admin.createTable( + tableDescriptor(tableName), + new RegionSplitter.HexStringSplit().split(splitCount)); + } + + private CompletableFuture createTableWithReplicas(final TableName tableName, + final int replicaCount) { + return admin.createTable(tableDescriptor(tableName, replicaCount)); + } + + private CompletableFuture createNamespace(final String namespace) { + final NamespaceDescriptor descriptor = NamespaceDescriptor.create(namespace).build(); + return admin.createNamespace(descriptor); + } + + /** + * Helper for mocking an {@link HttpServletRequest} relevant to the test. + */ + static class MockRequestBuilder { + + private String limit = null; + private String regionState = null; + private String start = null; + private String table = null; + + public MockRequestBuilder setLimit(final int value) { + this.limit = Integer.toString(value); + return this; + } + + public MockRequestBuilder setLimit(final String value) { + this.limit = value; + return this; + } + + public MockRequestBuilder setRegionState(final RegionState.State value) { + this.regionState = value.toString(); + return this; + } + + public MockRequestBuilder setRegionState(final String value) { + this.regionState = value; + return this; + } + + public MockRequestBuilder setStart(final String value) { + this.start = value; + return this; + } + + public MockRequestBuilder setTable(final String value) { + this.table = value; + return this; + } + + public HttpServletRequest build() { + final HttpServletRequest request = mock(HttpServletRequest.class); + when(request.getRequestURI()).thenReturn("/table.jsp"); + when(request.getParameter("name")).thenReturn("hbase%3Ameta"); + + when(request.getParameter("scan_limit")).thenReturn(limit); + when(request.getParameter("scan_region_state")).thenReturn(regionState); + when(request.getParameter("scan_start")).thenReturn(start); + when(request.getParameter("scan_table")).thenReturn(table); + + return request; + } + } +} diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowserNoCluster.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowserNoCluster.java new file mode 100644 index 00000000000..ebb1227463b --- /dev/null +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/webapp/TestMetaBrowserNoCluster.java @@ -0,0 +1,168 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hbase.master.webapp; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; +import javax.servlet.http.HttpServletRequest; +import org.apache.hadoop.hbase.HBaseClassTestRule; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.AsyncConnection; +import org.apache.hadoop.hbase.master.RegionState; +import org.apache.hadoop.hbase.master.webapp.TestMetaBrowser.MockRequestBuilder; +import org.apache.hadoop.hbase.testclassification.MasterTests; +import org.apache.hadoop.hbase.testclassification.SmallTests; +import org.junit.Before; +import org.junit.ClassRule; +import org.junit.Test; +import org.junit.experimental.categories.Category; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +/** + * Cluster-backed correctness tests for the functionality provided by {@link MetaBrowser}. + */ +@Category({ MasterTests.class, SmallTests.class}) +public class TestMetaBrowserNoCluster { + + @ClassRule + public static final HBaseClassTestRule testRule = + HBaseClassTestRule.forClass(TestMetaBrowserNoCluster.class); + + @Mock + private AsyncConnection connection; + + @Before + public void before() { + MockitoAnnotations.initMocks(this); + } + + @Test + public void buildFirstPageQueryStringNoParams() { + final HttpServletRequest request = new MockRequestBuilder().build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + + assertEquals("hbase:meta", metaBrowser.getName()); + assertNull(metaBrowser.getScanLimit()); + assertNull(metaBrowser.getScanRegionState()); + assertNull(metaBrowser.getScanStart()); + assertNull(metaBrowser.getScanTable()); + assertEquals("/table.jsp?name=hbase%3Ameta", metaBrowser.buildFirstPageUrl()); + } + + @Test + public void buildFirstPageQueryStringNonNullParams() { + final HttpServletRequest request = new MockRequestBuilder() + .setLimit(50) + .setRegionState(RegionState.State.ABNORMALLY_CLOSED) + .setTable("foo%3Abar") + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + + assertEquals(50, metaBrowser.getScanLimit().intValue()); + assertEquals(RegionState.State.ABNORMALLY_CLOSED, metaBrowser.getScanRegionState()); + assertEquals(TableName.valueOf("foo", "bar"), metaBrowser.getScanTable()); + assertEquals( + "/table.jsp?name=hbase%3Ameta" + + "&scan_limit=50" + + "&scan_region_state=ABNORMALLY_CLOSED" + + "&scan_table=foo%3Abar", + metaBrowser.buildNextPageUrl(null)); + } + + @Test + public void buildNextPageQueryString() { + final HttpServletRequest request = new MockRequestBuilder().build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + + assertEquals( + "/table.jsp?name=hbase%3Ameta&scan_start=%255Cx80%255Cx00%255Cx7F", + metaBrowser.buildNextPageUrl(new byte[] { Byte.MIN_VALUE, (byte) 0, Byte.MAX_VALUE })); + } + + @Test + public void unparseableLimitParam() { + final HttpServletRequest request = new MockRequestBuilder() + .setLimit("foo") + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + assertNull(metaBrowser.getScanLimit()); + assertThat(metaBrowser.getErrorMessages(), contains( + "Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.")); + } + + @Test + public void zeroLimitParam() { + final HttpServletRequest request = new MockRequestBuilder() + .setLimit(0) + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + assertEquals(MetaBrowser.SCAN_LIMIT_DEFAULT, metaBrowser.getScanLimit().intValue()); + assertThat(metaBrowser.getErrorMessages(), contains( + "Requested SCAN_LIMIT value 0 is <= 0.")); + } + + @Test + public void negativeLimitParam() { + final HttpServletRequest request = new MockRequestBuilder() + .setLimit(-10) + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + assertEquals(MetaBrowser.SCAN_LIMIT_DEFAULT, metaBrowser.getScanLimit().intValue()); + assertThat(metaBrowser.getErrorMessages(), contains( + "Requested SCAN_LIMIT value -10 is <= 0.")); + } + + @Test + public void excessiveLimitParam() { + final HttpServletRequest request = new MockRequestBuilder() + .setLimit(10_001) + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + assertEquals(MetaBrowser.SCAN_LIMIT_MAX, metaBrowser.getScanLimit().intValue()); + assertThat(metaBrowser.getErrorMessages(), contains( + "Requested SCAN_LIMIT value 10001 exceeds maximum value 10000.")); + } + + @Test + public void invalidRegionStateParam() { + final HttpServletRequest request = new MockRequestBuilder() + .setRegionState("foo") + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + assertNull(metaBrowser.getScanRegionState()); + assertThat(metaBrowser.getErrorMessages(), contains( + "Requested SCAN_REGION_STATE value 'foo' cannot be parsed as a RegionState.")); + } + + @Test + public void multipleErrorMessages() { + final HttpServletRequest request = new MockRequestBuilder() + .setLimit("foo") + .setRegionState("bar") + .build(); + final MetaBrowser metaBrowser = new MetaBrowser(connection, request); + assertThat(metaBrowser.getErrorMessages(), containsInAnyOrder( + "Requested SCAN_LIMIT value 'foo' cannot be parsed as an integer.", + "Requested SCAN_REGION_STATE value 'bar' cannot be parsed as a RegionState." + )); + } +} diff --git a/pom.xml b/pom.xml index 275673aab8b..0e9351f6f53 100755 --- a/pom.xml +++ b/pom.xml @@ -2152,6 +2152,12 @@ ${hamcrest.version} test
+ + org.hamcrest + hamcrest-library + ${hamcrest.version} + test + org.mockito mockito-core