diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java index d714c47359b..c2385084a3e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CatalogFamilyFormat.java @@ -349,8 +349,8 @@ public class CatalogFamilyFormat { } /** - * @return Deserialized values of <qualifier,regioninfo> pairs taken from column values that - * match the regex 'info:merge.*' in array of cells. + * Returns Deserialized values of <qualifier,regioninfo> pairs taken from column values that + * match the regex 'info:merge.*' in array of cells. */ @Nullable public static Map getMergeRegionsWithName(Cell[] cells) { @@ -376,8 +376,8 @@ public class CatalogFamilyFormat { } /** - * @return Deserialized regioninfo values taken from column values that match the regex - * 'info:merge.*' in array of cells. + * Returns Deserialized regioninfo values taken from column values that match the regex + * 'info:merge.*' in array of cells. */ @Nullable public static List getMergeRegions(Cell[] cells) { @@ -386,8 +386,8 @@ public class CatalogFamilyFormat { } /** - * @return True if any merge regions present in cells; i.e. the column in - * cell matches the regex 'info:merge.*'. + * Returns True if any merge regions present in cells; i.e. the column in + * cell matches the regex 'info:merge.*'. */ public static boolean hasMergeRegions(Cell[] cells) { for (Cell cell : cells) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java index b75398dd1cf..42bfd757e0d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClientMetaTableAccessor.java @@ -59,6 +59,7 @@ public final class ClientMetaTableAccessor { } @InterfaceAudience.Private + @SuppressWarnings("ImmutableEnumChecker") public enum QueryType { ALL(HConstants.TABLE_FAMILY, HConstants.CATALOG_FAMILY), REGION(HConstants.CATALOG_FAMILY), @@ -100,11 +101,7 @@ public final class ClientMetaTableAccessor { return future; } - /** - * Returns the HRegionLocation from meta for the given region n * @param regionName region we're - * looking for - * @return HRegionLocation for the given region - */ + /** Returns the HRegionLocation from meta for the given region */ public static CompletableFuture> getRegionLocation(AsyncTable metaTable, byte[] regionName) { CompletableFuture> future = new CompletableFuture<>(); @@ -126,11 +123,7 @@ public final class ClientMetaTableAccessor { return future; } - /** - * Returns the HRegionLocation from meta for the given encoded region name n * @param - * encodedRegionName region we're looking for - * @return HRegionLocation for the given region - */ + /** Returns the HRegionLocation from meta for the given encoded region name */ public static CompletableFuture> getRegionLocationWithEncodedName(AsyncTable metaTable, byte[] encodedRegionName) { CompletableFuture> future = new CompletableFuture<>(); @@ -167,8 +160,9 @@ public final class ClientMetaTableAccessor { } /** - * Used to get all region locations for the specific table. n * @param tableName table we're - * looking for, can be null for getting all regions + * Used to get all region locations for the specific table + * @param metaTable scanner over meta table + * @param tableName table we're looking for, can be null for getting all regions * @return the list of region locations. The return value will be wrapped by a * {@link CompletableFuture}. */ @@ -191,8 +185,9 @@ public final class ClientMetaTableAccessor { } /** - * Used to get table regions' info and server. n * @param tableName table we're looking for, can - * be null for getting all regions + * Used to get table regions' info and server. + * @param metaTable scanner over meta table + * @param tableName table we're looking for, can be null for getting all regions * @param excludeOfflinedSplitParents don't return split parents * @return the list of regioninfos and server. The return value will be wrapped by a * {@link CompletableFuture}. @@ -221,9 +216,11 @@ public final class ClientMetaTableAccessor { } /** - * Performs a scan of META table for given table. n * @param tableName table withing we scan - * @param type scanned part of meta - * @param visitor Visitor invoked against each row + * Performs a scan of META table for given table. + * @param metaTable scanner over meta table + * @param tableName table within we scan + * @param type scanned part of meta + * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, TableName tableName, QueryType type, final Visitor visitor) { @@ -232,11 +229,13 @@ public final class ClientMetaTableAccessor { } /** - * Performs a scan of META table for given table. n * @param startRow Where to start the scan - * @param stopRow Where to stop the scan - * @param type scanned part of meta - * @param maxRows maximum rows to return - * @param visitor Visitor invoked against each row + * Performs a scan of META table for given table. + * @param metaTable scanner over meta table + * @param startRow Where to start the scan + * @param stopRow Where to stop the scan + * @param type scanned part of meta + * @param maxRows maximum rows to return + * @param visitor Visitor invoked against each row */ private static CompletableFuture scanMeta(AsyncTable metaTable, byte[] startRow, byte[] stopRow, QueryType type, int maxRows, final Visitor visitor) { @@ -456,19 +455,12 @@ public final class ClientMetaTableAccessor { return scan; } - /** - * Returns an HRegionLocationList extracted from the result. - * @return an HRegionLocationList containing all locations for the region range or null if we - * can't deserialize the result. - */ + /** Returns an HRegionLocationList extracted from the result. */ private static Optional getRegionLocations(Result r) { return Optional.ofNullable(CatalogFamilyFormat.getRegionLocations(r)); } - /** - * @param tableName table we're working with - * @return start row for scanning META according to query type - */ + /** Returns start row for scanning META according to query type */ public static byte[] getTableStartRowForMeta(TableName tableName, QueryType type) { if (tableName == null) { return null; @@ -490,10 +482,7 @@ public final class ClientMetaTableAccessor { } } - /** - * @param tableName table we're working with - * @return stop row for scanning META according to query type - */ + /** Returns stop row for scanning META according to query type */ public static byte[] getTableStopRowForMeta(TableName tableName, QueryType type) { if (tableName == null) { return null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java index 9b7a5de19bd..8c675c4522e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterId.java @@ -51,6 +51,7 @@ public class ClusterId { } /** + * Parse the serialized representation of the {@link ClusterId} * @param bytes A pb serialized {@link ClusterId} instance with pb magic prefix * @return An instance of {@link ClusterId} made from bytes n * @see #toByteArray() */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java index 5695f5b65ad..7254209487b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterMetricsBuilder.java @@ -67,13 +67,13 @@ public final class ClusterMetricsBuilder { .collect(Collectors.toList())) .addAllTableRegionStatesCount(metrics.getTableRegionStatesCount().entrySet().stream() .map(status -> ClusterStatusProtos.TableRegionStatesCount.newBuilder() - .setTableName(ProtobufUtil.toProtoTableName((status.getKey()))) + .setTableName(ProtobufUtil.toProtoTableName(status.getKey())) .setRegionStatesCount(ProtobufUtil.toTableRegionStatesCount(status.getValue())).build()) .collect(Collectors.toList())) .addAllDecommissionedServers(metrics.getDecommissionedServerNames().stream() .map(ProtobufUtil::toServerName).collect(Collectors.toList())); if (metrics.getMasterName() != null) { - builder.setMaster(ProtobufUtil.toServerName((metrics.getMasterName()))); + builder.setMaster(ProtobufUtil.toServerName(metrics.getMasterName())); } if (metrics.getMasterTasks() != null) { builder.addAllMasterTasks(metrics.getMasterTasks().stream() diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java index 94909f1c14e..32e06d61024 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/CoprocessorEnvironment.java @@ -44,8 +44,8 @@ public interface CoprocessorEnvironment { int getLoadSequence(); /** - * @return a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to - * set a configuration. + * Returns a Read-only Configuration; throws {@link UnsupportedOperationException} if you try to + * set a configuration. */ Configuration getConfiguration(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java index 3484995c1bf..47a86f9492f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HBaseServerException.java @@ -44,10 +44,7 @@ public class HBaseServerException extends HBaseIOException { this.serverOverloaded = serverOverloaded; } - /** - * @param t throwable to check for server overloaded state - * @return True if the server was considered overloaded when the exception was thrown - */ + /** Returns True if the server was considered overloaded when the exception was thrown */ public static boolean isServerOverloaded(Throwable t) { if (t instanceof HBaseServerException) { return ((HBaseServerException) t).isServerOverloaded(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java index 0decb58bc20..ebf6d919374 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HRegionLocation.java @@ -100,8 +100,8 @@ public class HRegionLocation implements Comparable { } /** - * @return String made of hostname and port formatted as per - * {@link Addressing#createHostAndPortStr(String, int)} + * Returns String made of hostname and port formatted as per + * {@link Addressing#createHostAndPortStr(String, int)} */ public String getHostnamePort() { return Addressing.createHostAndPortStr(this.getHostname(), this.getPort()); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 4d6dd6d43fa..4c0390c6c3b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -208,6 +208,7 @@ public class RegionLocations implements Iterable { * @param other the locations to merge with * @return an RegionLocations object with merged locations or the same object if nothing is merged */ + @SuppressWarnings("ReferenceEquality") public RegionLocations mergeLocations(RegionLocations other) { assert other != null; @@ -280,6 +281,7 @@ public class RegionLocations implements Iterable { * @return an RegionLocations object with updated locations or the same object if nothing is * updated */ + @SuppressWarnings("ReferenceEquality") public RegionLocations updateLocation(HRegionLocation location, boolean checkForEquals, boolean force) { assert location != null; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index 88527e86442..47b36a7a151 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -53,8 +53,8 @@ public interface RegionMetrics { public long getCpRequestCount(); /** - * @return the number of write requests and read requests and coprocessor service requests made to - * region + * Returns the number of write requests and read requests and coprocessor service requests made to + * region */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount(); @@ -113,8 +113,8 @@ public interface RegionMetrics { int getStoreRefCount(); /** - * @return the max reference count for any store file among all compacted stores files of this - * region + * Returns the max reference count for any store file among all compacted stores files of this + * region */ int getMaxCompactedStoreFileRefCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java index 99f8520aa36..7a0312f22fd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerMetricsBuilder.java @@ -44,10 +44,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.HBaseProtos; @InterfaceAudience.Private public final class ServerMetricsBuilder { - /** - * @param sn the server name - * @return a empty metrics - */ public static ServerMetrics of(ServerName sn) { return newBuilder(sn).build(); } @@ -300,6 +296,7 @@ public final class ServerMetricsBuilder { return versionNumber; } + @Override public String getVersion() { return version; } @@ -414,16 +411,18 @@ public final class ServerMetricsBuilder { int currentMaxCompactedStoreFileRefCount = r.getMaxCompactedStoreFileRefCount(); maxCompactedStoreFileRefCount = Math.max(maxCompactedStoreFileRefCount, currentMaxCompactedStoreFileRefCount); - uncompressedStoreFileSizeMB += r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); - storeFileSizeMB += r.getStoreFileSize().get(Size.Unit.MEGABYTE); - memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE); - storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); + uncompressedStoreFileSizeMB += + (long) r.getUncompressedStoreFileSize().get(Size.Unit.MEGABYTE); + storeFileSizeMB += (long) r.getStoreFileSize().get(Size.Unit.MEGABYTE); + memStoreSizeMB += (long) r.getMemStoreSize().get(Size.Unit.MEGABYTE); + storefileIndexSizeKB += + (long) r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE); readRequestsCount += r.getReadRequestCount(); cpRequestsCount += r.getCpRequestCount(); writeRequestsCount += r.getWriteRequestCount(); filteredReadRequestsCount += r.getFilteredReadRequestCount(); - rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); - bloomFilterSizeMB += r.getBloomFilterSize().get(Size.Unit.MEGABYTE); + rootLevelIndexSizeKB += (long) r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE); + bloomFilterSizeMB += (long) r.getBloomFilterSize().get(Size.Unit.MEGABYTE); compactedCellCount += r.getCompactedCellCount(); compactingCellCount += r.getCompactingCellCount(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java index 05108c70e74..681b1f416c7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetrics.java @@ -50,8 +50,8 @@ public interface UserMetrics { long getWriteRequestCount(); /** - * @return the number of write requests and read requests and coprocessor service requests made by - * the user + * Returns the number of write requests and read requests and coprocessor service requests made by + * the user */ default long getRequestCount() { return getReadRequestCount() + getWriteRequestCount(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java index ab63f19fec8..4a66283146d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/UserMetricsBuilder.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hbase; +import java.nio.charset.StandardCharsets; import java.util.HashMap; import java.util.Map; import org.apache.hadoop.hbase.util.Strings; @@ -30,7 +31,8 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.ClusterStatusProtos; public final class UserMetricsBuilder { public static UserMetrics toUserMetrics(ClusterStatusProtos.UserLoad userLoad) { - UserMetricsBuilder builder = UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes()); + UserMetricsBuilder builder = + UserMetricsBuilder.newBuilder(userLoad.getUserName().getBytes(StandardCharsets.UTF_8)); userLoad.getClientMetricsList().stream() .map(clientMetrics -> new ClientMetricsImpl(clientMetrics.getHostName(), clientMetrics.getReadRequestsCount(), clientMetrics.getWriteRequestsCount(), diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java index bb44defbac6..b0a33eda402 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractResponse.java @@ -27,11 +27,9 @@ abstract class AbstractResponse { public enum ResponseType { - SINGLE(0), - MULTI(1); + SINGLE, + MULTI; - ResponseType(int value) { - } } public abstract ResponseType type(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java index 2380335e56b..6dd14a520ee 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AbstractRpcBasedConnectionRegistry.java @@ -135,7 +135,7 @@ abstract class AbstractRpcBasedConnectionRegistry implements ConnectionRegistry * Typically, you can use lambda expression to implement this interface as * *
-   * (c, s, d) -> s.xxx(c, your request here, d)
+   * (c, s, d) -> s.xxx(c, your request here, d)
    * 
*/ @FunctionalInterface diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 96923ae8462..bdb96ef3c14 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -115,6 +115,7 @@ public interface Admin extends Abortable, Closeable { Connection getConnection(); /** + * Check if a table exists. * @param tableName Table to check. * @return true if table exists already. * @throws IOException if a remote or network exception occurs @@ -187,8 +188,8 @@ public interface Admin extends Abortable, Closeable { * Get a table descriptor. * @param tableName as a {@link TableName} * @return the tableDescriptor - * @throws org.apache.hadoop.hbase.TableNotFoundException - * @throws IOException if a remote or network exception occurs + * @throws TableNotFoundException if the table was not found + * @throws IOException if a remote or network exception occurs */ TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException; @@ -197,7 +198,7 @@ public interface Admin extends Abortable, Closeable { * @param desc table descriptor for table * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -223,7 +224,7 @@ public interface Admin extends Abortable, Closeable { * occurs * @throws IllegalArgumentException if the table name is reserved * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -242,7 +243,7 @@ public interface Admin extends Abortable, Closeable { * split keys are repeated and if the * split key has empty byte array. * @throws org.apache.hadoop.hbase.MasterNotRunningException if master is not running - * @throws org.apache.hadoop.hbase.TableExistsException if table already exists (If + * @throws TableExistsException if table already exists (If * concurrent threads, the table may * have been created between * test-for-existence and @@ -381,6 +382,7 @@ public interface Admin extends Abortable, Closeable { } /** + * Check if a table is enabled. * @param tableName name of table to check * @return true if table is on-line * @throws IOException if a remote or network exception occurs @@ -388,6 +390,7 @@ public interface Admin extends Abortable, Closeable { boolean isTableEnabled(TableName tableName) throws IOException; /** + * Check if a table is disabled. * @param tableName name of table to check * @return true if table is off-line * @throws IOException if a remote or network exception occurs @@ -395,6 +398,7 @@ public interface Admin extends Abortable, Closeable { boolean isTableDisabled(TableName tableName) throws IOException; /** + * Check if a table is available. * @param tableName name of table to check * @return true if all regions of the table are available * @throws IOException if a remote or network exception occurs @@ -1100,6 +1104,7 @@ public interface Admin extends Abortable, Closeable { ClusterMetrics getClusterMetrics(EnumSet