diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java index 414d4ee7b49..813c060b520 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/AsyncMetaTableAccessor.java @@ -101,11 +101,7 @@ public class AsyncMetaTableAccessor { return future; } - /** - * Returns the HRegionLocation from meta for the given region n * @param regionName region we're - * looking for - * @return HRegionLocation for the given region - */ + /** Returns the HRegionLocation from meta for the given region */ public static CompletableFuture> getRegionLocation(AsyncTable metaTable, byte[] regionName) { CompletableFuture> future = new CompletableFuture<>(); @@ -127,11 +123,7 @@ public class AsyncMetaTableAccessor { return future; } - /** - * Returns the HRegionLocation from meta for the given encoded region name n * @param - * encodedRegionName region we're looking for - * @return HRegionLocation for the given region - */ + /** Returns the HRegionLocation from meta for the given encoded region name */ public static CompletableFuture> getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { CompletableFuture> future = new CompletableFuture<>(); @@ -176,8 +168,9 @@ public class AsyncMetaTableAccessor { } /** - * Used to get all region locations for the specific table. n * @param tableName table we're - * looking for, can be null for getting all regions + * Used to get all region locations for the specific table + * @param metaTable scanner over meta table + * @param tableName table we're looking for, can be null for getting all regions * @return the list of region locations. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -200,8 +193,9 @@ public class AsyncMetaTableAccessor { } /** - * Used to get table regions' info and server. n * @param tableName table we're looking for, can - * be null for getting all regions + * Used to get table regions' info and server. + * @param metaTable scanner over meta table + * @param tableName table we're looking for, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. @@ -259,9 +253,11 @@ public class AsyncMetaTableAccessor { } /** - * Performs a scan of META table for given table. n * @param tableName table withing we scan - * @param type scanned part of meta - * @param visitor Visitor invoked against each row + * Performs a scan of META table for given table. + * @param metaTable scanner over meta table + * @param tableName table within we scan + * @param type scanned part of meta + * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, TableName tableName, QueryType type, final Visitor visitor) { @@ -270,11 +266,13 @@ public class AsyncMetaTableAccessor { } /** - * Performs a scan of META table for given table. n * @param startRow Where to start the scan - * @param stopRow Where to stop the scan - * @param type scanned part of meta - * @param maxRows maximum rows to return - * @param visitor Visitor invoked against each row + * Performs a scan of META table for given table. + * @param metaTable scanner over meta table + * @param startRow Where to start the scan + * @param stopRow Where to stop the scan + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { @@ -410,9 +408,13 @@ public class AsyncMetaTableAccessor { * can't deserialize the result. */ private static Optional getRegionLocations(final Result r) { - if (r == null) return Optional.empty(); + if (r == null) { + return Optional.empty(); + } Optional regionInfo = getHRegionInfo(r, getRegionInfoColumn()); - if (!regionInfo.isPresent()) return Optional.empty(); + if (!regionInfo.isPresent()) { + return Optional.empty(); + } List locations = new ArrayList(1); NavigableMap> familyMap = r.getNoVersionMap(); @@ -420,15 +422,18 @@ public class AsyncMetaTableAccessor { locations.add(getRegionLocation(r, regionInfo.get(), 0)); NavigableMap infoMap = familyMap.get(getCatalogFamily()); - if (infoMap == null) return Optional.of(new RegionLocations(locations)); + if (infoMap == null) { + return Optional.of(new RegionLocations(locations)); + } // iterate until all serverName columns are seen int replicaId = 0; byte[] serverColumn = getServerColumn(replicaId); - SortedMap serverMap = null; - serverMap = infoMap.tailMap(serverColumn, false); + SortedMap serverMap = infoMap.tailMap(serverColumn, false); - if (serverMap.isEmpty()) return Optional.of(new RegionLocations(locations)); + if (serverMap.isEmpty()) { + return Optional.of(new RegionLocations(locations)); + } for (Map.Entry entry : serverMap.entrySet()) { replicaId = parseReplicaIdFromServerColumn(entry.getKey()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 9b7a5de19bd..8c675c4522e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -51,6 +51,7 @@ public class ClusterId { } /** + * Parse the serialized representation of the {@link ClusterId} * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray() */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 7ef8a208611..630e3620a67 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -67,11 +67,11 @@ public final class ClusterMetricsBuilder { .collect(Collectors.toList())) .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setTableName(ProtobufUtil.toProtoTableName(status.getKey())) .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build()) .collect(Collectors.toList())); if (metrics.getMasterName() != null) { - builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); + builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName())); } if (metrics.getMasterTasks() != null) { builder.addAllMasterTasks(metrics.getMasterTasks().stream() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index e218437694b..e3b1a8ab662 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -186,6 +186,7 @@ public class ClusterStatus implements ClusterMetrics { } /** Returns the HBase version string as reported by the HMaster */ + @Override public String getHBaseVersion() { return metrics.getHBaseVersion(); } @@ -279,6 +280,7 @@ public class ClusterStatus implements ClusterMetrics { return serverMetrics == null ? null : new ServerLoad(serverMetrics); } + @Override public String getClusterId() { return metrics.getClusterId(); } @@ -289,6 +291,7 @@ public class ClusterStatus implements ClusterMetrics { } /** + * Get the list of master coprocessor names. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link #getMasterCoprocessorNames} instead. */ @@ -299,6 +302,7 @@ public class ClusterStatus implements ClusterMetrics { } /** + * Get the last major compaction time for a given table. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link #getLastMajorCompactionTimestamp(TableName)} instead. */ @@ -308,6 +312,7 @@ public class ClusterStatus implements ClusterMetrics { } /** + * Get the last major compaction time for a given region. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link #getLastMajorCompactionTimestamp(byte[])} instead. */ @@ -317,6 +322,7 @@ public class ClusterStatus implements ClusterMetrics { } /** + * Returns true if the balancer is enabled. * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 No flag in 2.0 */ @Deprecated diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index 94909f1c14e..32e06d61024 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -44,8 +44,8 @@ public interface CoprocessorEnvironment { int getLoadSequence(); /** - * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to - * set a configuration. + * Returns a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to + * set a configuration. */ Configuration getConfiguration(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java index 3484995c1bf..47a86f9492f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java @@ -44,10 +44,7 @@ public class HBaseServerException extends HBaseIOException { this.serverOverloaded = serverOverloaded; } - /** - * @param t throwable to check for server overloaded state - * @return True if the server was considered overloaded when the exception was thrown - */ + /** Returns True if the server was considered overloaded when the exception was thrown */ public static boolean isServerOverloaded(Throwable t) { if (t instanceof HBaseServerException) { return ((HBaseServerException) t).isServerOverloaded(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index d55b417d482..43640858ccc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -176,6 +176,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparableb * @throws IllegalArgumentException If not null and not a legitimate family name: i.e. 'printable' @@ -205,19 +206,12 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, ComparableHBASE-13655). Use * {@link #getCompressionType()}. @@ -267,7 +248,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, ComparableHBASE-13655). Use * {@link #getCompactionCompressionType()}. @@ -283,6 +264,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparablebytes n * @see * #toByteArray() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java index 5e9abd31a7e..2f4d6377888 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionInfo.java @@ -221,7 +221,7 @@ public class HRegionInfo implements RegionInfo { /** * Construct HRegionInfo with explicit parameters - * @param tableName the table descriptor + * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions regions that may or @@ -234,37 +234,37 @@ public class HRegionInfo implements RegionInfo { /** * Construct HRegionInfo with explicit parameters - * @param tableName the table descriptor + * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions regions that may or * may not hold references to this region. - * @param regionid Region id to use. n + * @param regionId Region id to use. */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split, final long regionid) throws IllegalArgumentException { - this(tableName, startKey, endKey, split, regionid, DEFAULT_REPLICA_ID); + final boolean split, final long regionId) throws IllegalArgumentException { + this(tableName, startKey, endKey, split, regionId, DEFAULT_REPLICA_ID); } /** * Construct HRegionInfo with explicit parameters - * @param tableName the table descriptor + * @param tableName the table name * @param startKey first key in region * @param endKey end of key range * @param split true if this region has split and we have daughter regions regions that may or * may not hold references to this region. - * @param regionid Region id to use. - * @param replicaId the replicaId to use n + * @param regionId Region id to use. + * @param replicaId the replicaId to use */ public HRegionInfo(final TableName tableName, final byte[] startKey, final byte[] endKey, - final boolean split, final long regionid, final int replicaId) throws IllegalArgumentException { + final boolean split, final long regionId, final int replicaId) throws IllegalArgumentException { super(); if (tableName == null) { throw new IllegalArgumentException("TableName cannot be null"); } this.tableName = tableName; this.offLine = false; - this.regionId = regionid; + this.regionId = regionId; this.replicaId = replicaId; if (this.replicaId > MAX_REPLICA_ID) { throw new IllegalArgumentException("ReplicaId cannot be greater than" + MAX_REPLICA_ID); @@ -280,7 +280,7 @@ public class HRegionInfo implements RegionInfo { } /** - * Costruct a copy of another HRegionInfo n + * Construct a copy of another HRegionInfo */ public HRegionInfo(RegionInfo other) { super(); @@ -303,8 +303,10 @@ public class HRegionInfo implements RegionInfo { } /** - * Make a region name of passed parameters. n * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null + * @param regionId Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format (such that it contains its * encoded name?). * @return Region name made of passed tableName, startKey and id @@ -314,12 +316,14 @@ public class HRegionInfo implements RegionInfo { @Deprecated @InterfaceAudience.Private public static byte[] createRegionName(final TableName tableName, final byte[] startKey, - final long regionid, boolean newFormat) { - return RegionInfo.createRegionName(tableName, startKey, Long.toString(regionid), newFormat); + final long regionId, boolean newFormat) { + return RegionInfo.createRegionName(tableName, startKey, Long.toString(regionId), newFormat); } /** - * Make a region name of passed parameters. n * @param startKey Can be null + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format (such that it contains its * encoded name?). @@ -335,10 +339,12 @@ public class HRegionInfo implements RegionInfo { } /** - * Make a region name of passed parameters. n * @param startKey Can be null - * @param regionid Region id (Usually timestamp from when region was created). n * @param - * newFormat should we create the region name in the new format (such that it - * contains its encoded name?). + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null + * @param regionId Region id (Usually timestamp from when region was created). + * @param newFormat should we create the region name in the new format (such that it contains its + * encoded name?). * @return Region name made of passed tableName, startKey, id and replicaId * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link RegionInfo#createRegionName(TableName, byte[], long, int, boolean)}. @@ -346,13 +352,15 @@ public class HRegionInfo implements RegionInfo { @Deprecated @InterfaceAudience.Private public static byte[] createRegionName(final TableName tableName, final byte[] startKey, - final long regionid, int replicaId, boolean newFormat) { - return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionid)), + final long regionId, int replicaId, boolean newFormat) { + return RegionInfo.createRegionName(tableName, startKey, Bytes.toBytes(Long.toString(regionId)), replicaId, newFormat); } /** - * Make a region name of passed parameters. n * @param startKey Can be null + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null * @param id Region id (Usually timestamp from when region was created). * @param newFormat should we create the region name in the new format (such that it contains its * encoded name?). @@ -368,9 +376,11 @@ public class HRegionInfo implements RegionInfo { } /** - * Make a region name of passed parameters. n * @param startKey Can be null - * @param id Region id (Usually timestamp from when region was created). n * @param newFormat - * should we create the region name in the new format + * Make a region name of passed parameters. + * @param tableName the table name + * @param startKey Can be null + * @param id Region id (Usually timestamp from when region was created) + * @param newFormat should we create the region name in the new format * @return Region name made of passed tableName, startKey, id and replicaId * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use * {@link RegionInfo#createRegionName(TableName, byte[], byte[], int, boolean)}. @@ -546,6 +556,7 @@ public class HRegionInfo implements RegionInfo { } /** + * Set or clear the split status flag. * @param split set split status */ public void setSplit(boolean split) { @@ -684,6 +695,7 @@ public class HRegionInfo implements RegionInfo { } /** + * Serialize a {@link HRegionInfo} into a byte array. * @return This instance serialized as protobuf w/ a magic pb prefix. * @see #parseFrom(byte[]) * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use @@ -695,6 +707,7 @@ public class HRegionInfo implements RegionInfo { } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes * null * @see #toByteArray() @@ -708,6 +721,7 @@ public class HRegionInfo implements RegionInfo { } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @return A deserialized {@link HRegionInfo} or null if we failed deserialize or passed bytes * null * @see #toByteArray() @@ -725,6 +739,7 @@ public class HRegionInfo implements RegionInfo { } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @return A deserialized {@link HRegionInfo} n * @see #toByteArray() * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 Use @@ -736,6 +751,7 @@ public class HRegionInfo implements RegionInfo { } /** + * Parse a serialized representation of a {@link HRegionInfo}. * @param bytes A pb RegionInfo serialized with a pb magic prefix. * @param offset starting point in the byte array * @param len length to read on the byte array diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index 6ae93bb3954..a7720b2734e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -110,8 +110,8 @@ public class HRegionLocation implements Comparable { } /** - * @return String made of hostname and port formatted as per - * {@link Addressing#createHostAndPortStr(String, int)} + * Returns String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java index 6c8ea810d03..f5448e61737 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HTableDescriptor.java @@ -801,6 +801,7 @@ public class HTableDescriptor implements TableDescriptor, ComparableHBASE-6188 */ @@ -811,6 +812,7 @@ public class HTableDescriptor implements TableDescriptor, ComparableHBASE-6188 */ @@ -822,6 +824,7 @@ public class HTableDescriptor implements TableDescriptor, ComparableHBASE-6188 */ @@ -832,14 +835,14 @@ public class HTableDescriptor implements TableDescriptor, Comparablebytes nn * @see * #toByteArray() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java index 01b5f49a205..48476e4bb3a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MetaTableAccessor.java @@ -38,7 +38,6 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.stream.Collectors; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.Cell.Type; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.Consistency; import org.apache.hadoop.hbase.client.Delete; @@ -155,6 +154,7 @@ public class MetaTableAccessor { private static final byte SEPARATED_BYTE = 0x00; @InterfaceAudience.Private + @SuppressWarnings("ImmutableEnumChecker") public enum QueryType { ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY), @@ -364,8 +364,8 @@ public class MetaTableAccessor { } /** - * @return Return all regioninfos listed in the 'info:merge*' columns of the - * regionName row. + * Returns Return all regioninfos listed in the 'info:merge*' columns of the + * regionName row. */ @Nullable public static List getMergeRegions(Connection connection, byte[] regionName) @@ -381,8 +381,8 @@ public class MetaTableAccessor { } /** - * @return Deserialized values of <qualifier,regioninfo> pairs taken from column values that - * match the regex 'info:merge.*' in array of cells. + * Returns Deserialized values of <qualifier,regioninfo> pairs taken from column values that + * match the regex 'info:merge.*' in array of cells. */ @Nullable public static Map getMergeRegionsWithName(Cell[] cells) { @@ -408,8 +408,8 @@ public class MetaTableAccessor { } /** - * @return Deserialized regioninfo values taken from column values that match the regex - * 'info:merge.*' in array of cells. + * Returns Deserialized regioninfo values taken from column values that match the regex + * 'info:merge.*' in array of cells. */ @Nullable public static List getMergeRegions(Cell[] cells) { @@ -418,8 +418,8 @@ public class MetaTableAccessor { } /** - * @return True if any merge regions present in cells; i.e. the column in - * cell matches the regex 'info:merge.*'. + * Returns True if any merge regions present in cells; i.e. the column in + * cell matches the regex 'info:merge.*'. */ public static boolean hasMergeRegions(Cell[] cells) { for (Cell cell : cells) { @@ -483,6 +483,7 @@ public class MetaTableAccessor { return getListOfRegionInfos(result); } + @SuppressWarnings("MixedMutabilityReturnType") private static List getListOfRegionInfos(final List> pairs) { if (pairs == null || pairs.isEmpty()) { @@ -496,8 +497,7 @@ public class MetaTableAccessor { } /** - * @param tableName table we're working with - * @return start row for scanning META according to query type + * Returns start row for scanning META according to query type */ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) { if (tableName == null) { @@ -518,8 +518,7 @@ public class MetaTableAccessor { } /** - * @param tableName table we're working with - * @return stop row for scanning META according to query type + * Returns stop row for scanning META according to query type */ public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) { if (tableName == null) { @@ -641,9 +640,10 @@ public class MetaTableAccessor { } /** + * Get the user regions a given server is hosting. * @param connection connection we're using * @param serverName server whose regions we're interested in - * @return List of user regions installed on this server (does not include catalog regions). n + * @return List of user regions installed on this server (does not include catalog regions). */ public static NavigableMap getServerUserRegions(Connection connection, final ServerName serverName) throws IOException { @@ -1284,7 +1284,7 @@ public class MetaTableAccessor { if (info == null) { return true; } - if (!(info.getTable().equals(tableName))) { + if (!info.getTable().equals(tableName)) { return false; } return super.visit(rowResult); @@ -1321,14 +1321,14 @@ public class MetaTableAccessor { if (splitA != null) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITA_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitA)) - .build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(RegionInfo.toByteArray(splitA)).build()); } if (splitB != null) { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(HConstants.SPLITB_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(splitB)) - .build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(RegionInfo.toByteArray(splitB)).build()); } return put; } @@ -1509,8 +1509,8 @@ public class MetaTableAccessor { String qualifier = String.format(HConstants.MERGE_QUALIFIER_PREFIX_STR + "%04d", counter++); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(Bytes.toBytes(qualifier)) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(RegionInfo.toByteArray(ri)) - .build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put) + .setValue(RegionInfo.toByteArray(ri)).build()); } return put; } @@ -1852,7 +1852,7 @@ public class MetaTableAccessor { public static Put addRegionInfo(final Put p, final RegionInfo hri) throws IOException { p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) .setFamily(getCatalogFamily()).setQualifier(HConstants.REGIONINFO_QUALIFIER) - .setTimestamp(p.getTimestamp()).setType(Type.Put) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put) // Serialize the Default Replica HRI otherwise scan of hbase:meta // shows an info:regioninfo value with encoded name and region // name that differs from that of the hbase;meta row. @@ -1872,8 +1872,8 @@ public class MetaTableAccessor { .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Cell.Type.Put).setValue(Bytes.toBytes(sn.getStartcode())).build()) .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) - .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()).setType(Type.Put) - .setValue(Bytes.toBytes(openSeqNum)).build()); + .setQualifier(getSeqNumColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).setValue(Bytes.toBytes(openSeqNum)).build()); } private static void writeRegionName(ByteArrayOutputStream out, byte[] regionName) { @@ -1922,7 +1922,7 @@ public class MetaTableAccessor { byte[] value = getParentsBytes(parents); put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(REPLICATION_PARENT_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(value).build()); + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(value).build()); } public static Put makePutForReplicationBarrier(RegionInfo regionInfo, long openSeqNum, long ts) @@ -1938,7 +1938,7 @@ public class MetaTableAccessor { public static void addReplicationBarrier(Put put, long openSeqNum) throws IOException { put.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(put.getRow()) .setFamily(HConstants.REPLICATION_BARRIER_FAMILY).setQualifier(HConstants.SEQNUM_QUALIFIER) - .setTimestamp(put.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .setTimestamp(put.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(openSeqNum)) .build()); } @@ -1946,8 +1946,8 @@ public class MetaTableAccessor { CellBuilder builder = CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY); return p .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) - .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()).setType(Type.Put) - .build()) + .setQualifier(getServerColumn(replicaId)).setTimestamp(p.getTimestamp()) + .setType(Cell.Type.Put).build()) .add(builder.clear().setRow(p.getRow()).setFamily(getCatalogFamily()) .setQualifier(getStartCodeColumn(replicaId)).setTimestamp(p.getTimestamp()) .setType(Cell.Type.Put).build()) @@ -2096,7 +2096,7 @@ public class MetaTableAccessor { private static Put addSequenceNum(Put p, long openSeqNum, int replicaId) throws IOException { return p.add(CellBuilderFactory.create(CellBuilderType.SHALLOW_COPY).setRow(p.getRow()) .setFamily(HConstants.CATALOG_FAMILY).setQualifier(getSeqNumColumn(replicaId)) - .setTimestamp(p.getTimestamp()).setType(Type.Put).setValue(Bytes.toBytes(openSeqNum)) + .setTimestamp(p.getTimestamp()).setType(Cell.Type.Put).setValue(Bytes.toBytes(openSeqNum)) .build()); } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index 0a762bf78a6..b45171e6495 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -354,6 +354,7 @@ public class RegionLoad implements RegionMetrics { } /** Returns the reference count for the stores of this region */ + @Override public int getStoreRefCount() { return metrics.getStoreRefCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 4d6dd6d43fa..4c0390c6c3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -208,6 +208,7 @@ public class RegionLocations implements Iterable { * @param other the locations to merge with * @return an RegionLocations object with merged locations or the same object if nothing is merged */ + @SuppressWarnings("ReferenceEquality") public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -280,6 +281,7 @@ public class RegionLocations implements Iterable { * @return an RegionLocations object with updated locations or the same object if nothing is * updated */ + @SuppressWarnings("ReferenceEquality") public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, boolean force) { assert location != null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 645a31a8552..d915e7a32ca 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -107,8 +107,8 @@ public interface RegionMetrics { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files of this - * region + * Returns the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 2320c8e908e..714a1412553 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -79,16 +79,17 @@ public class ServerLoad implements ServerMetrics { for (RegionMetrics rl : metrics.getRegionMetrics().values()) { stores += rl.getStoreCount(); storefiles += rl.getStoreFileCount(); - storeUncompressedSizeMB += rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storefileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE); - memstoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE); + storeUncompressedSizeMB += (int) rl.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storefileSizeMB += (int) rl.getStoreFileSize().get(Size.Unit.MEGABYTE); + memstoreSizeMB += (int) rl.getMemStoreSize().get(Size.Unit.MEGABYTE); readRequestsCount += rl.getReadRequestCount(); filteredReadRequestsCount += rl.getFilteredReadRequestCount(); writeRequestsCount += rl.getWriteRequestCount(); - storefileIndexSizeKB += rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE); - rootIndexSizeKB += rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); - totalStaticIndexSizeKB += rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); - totalStaticBloomSizeKB += rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); + storefileIndexSizeKB += (long) rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE); + rootIndexSizeKB += (int) rl.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + totalStaticIndexSizeKB += + (int) rl.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + totalStaticBloomSizeKB += (int) rl.getBloomFilterSize().get(Size.Unit.KILOBYTE); totalCompactingKVs += rl.getCompactingCellCount(); currentCompactedKVs += rl.getCompactedCellCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 72f78220c1a..1e57857db69 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -44,10 +44,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @InterfaceAudience.Private public final class ServerMetricsBuilder { - /** - * @param sn the server name - * @return a empty metrics - */ public static ServerMetrics of(ServerName sn) { return newBuilder(sn).build(); } @@ -280,6 +276,7 @@ public final class ServerMetricsBuilder { return versionNumber; } + @Override public String getVersion() { return version; } @@ -383,15 +380,17 @@ public final class ServerMetricsBuilder { int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); - uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); - memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); - storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + uncompressedStoreFileSizeMB += + (long) r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storeFileSizeMB += (long) r.getStoreFileSize().get(Size.Unit.MEGABYTE); + memStoreSizeMB += (long) r.getMemStoreSize().get(Size.Unit.MEGABYTE); + storefileIndexSizeKB += + (long) r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); readRequestsCount += r.getReadRequestCount(); writeRequestsCount += r.getWriteRequestCount(); filteredReadRequestsCount += r.getFilteredReadRequestCount(); - rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); - bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE); + rootLevelIndexSizeKB += (long) r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + bloomFilterSizeMB += (long) r.getBloomFilterSize().get(Size.Unit.MEGABYTE); compactedCellCount += r.getCompactedCellCount(); compactingCellCount += r.getCompactingCellCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 05108c70e74..681b1f416c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -50,8 +50,8 @@ public interface UserMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor service requests made by - * the user + * Returns the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index ab63f19fec8..4a66283146d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.hbase.util.Strings; @@ -30,7 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { - UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); + UserMetricsBuilder builder = + UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes(StandardCharsets.UTF_8)); userLoad.getClientMetricsList().stream() .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index bb44defbac6..b0a33eda402 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -27,11 +27,9 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE(0), - MULTI(1); + SINGLE, + MULTI; - ResponseType(int value) { - } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index bc1febe3803..4e97dcab24d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -135,7 +135,7 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry * Typically, you can use lambda expression to implement this interface as * *
-   * (c, s, d) -> s.xxx(c, your request here, d)
+   * (c, s, d) -> s.xxx(c, your request here, d)
    * 
*/ @FunctionalInterface diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 6a0913c23c7..a2f34ebbd8e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -116,6 +116,7 @@ public interface Admin extends Abortable, Closeable { Connection getConnection(); /** + * Check if a table exists. * @param tableName Table to check. * @return true if table exists already. * @throws IOException if a remote or network exception occurs @@ -267,8 +268,8 @@ public interface Admin extends Abortable, Closeable { * Get a table descriptor. * @param tableName as a {@link TableName} * @return the read-only tableDescriptor - * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws TableNotFoundException if the table was not found + * @throws IOException if a remote or network exception occurs * @deprecated since 2.0 version and will be removed in 3.0 version. Use * {@link #getDescriptor(TableName)}. */ @@ -280,8 +281,8 @@ public interface Admin extends Abortable, Closeable { * Get a table descriptor. * @param tableName as a {@link TableName} * @return the tableDescriptor - * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws TableNotFoundException if the table was not found + * @throws IOException if a remote or network exception occurs */ TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; @@ -290,7 +291,7 @@ public interface Admin extends Abortable, Closeable { * @param desc table descriptor for table * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -316,7 +317,7 @@ public interface Admin extends Abortable, Closeable { * @throws IOException if a remote or network exception * occurs * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -335,7 +336,7 @@ public interface Admin extends Abortable, Closeable { * split keys are repeated and if the * split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -574,6 +575,7 @@ public interface Admin extends Abortable, Closeable { HTableDescriptor[] disableTables(Pattern pattern) throws IOException; /** + * Check if a table is enabled. * @param tableName name of table to check * @return true if table is on-line * @throws IOException if a remote or network exception occurs @@ -581,6 +583,7 @@ public interface Admin extends Abortable, Closeable { boolean isTableEnabled(TableName tableName) throws IOException; /** + * Check if a table is disabled. * @param tableName name of table to check * @return true if table is off-line * @throws IOException if a remote or network exception occurs @@ -588,6 +591,7 @@ public interface Admin extends Abortable, Closeable { boolean isTableDisabled(TableName tableName) throws IOException; /** + * Check if a table is available. * @param tableName name of table to check * @return true if all regions of the table are available * @throws IOException if a remote or network exception occurs @@ -1646,6 +1650,7 @@ public interface Admin extends Abortable, Closeable { ClusterMetrics getClusterMetrics(EnumSet