From 2043c337d8db2db6692e25f87d5f85bbbab43813 Mon Sep 17 00:00:00 2001 From: Peter Somogyi Date: Wed, 13 Dec 2017 11:44:58 +0100 Subject: [PATCH] HBASE-19498 Fix findbugs and error-prone warnings in hbase-client (branch-2) Signed-off-by: Michael Stack Signed-off-by: Apekshit Sharma --- .../apache/hadoop/hbase/ClusterStatus.java | 12 +- .../hadoop/hbase/HColumnDescriptor.java | 1 + .../org/apache/hadoop/hbase/HRegionInfo.java | 42 +++- .../InvalidFamilyOperationException.java | 2 +- .../hbase/MasterNotRunningException.java | 2 +- .../hbase/NotServingRegionException.java | 2 +- .../apache/hadoop/hbase/RegionLocations.java | 2 +- .../hadoop/hbase/TableExistsException.java | 2 +- .../hbase/TableNotDisabledException.java | 2 +- .../hbase/ZooKeeperConnectionException.java | 2 +- .../apache/hadoop/hbase/client/Append.java | 2 + .../AsyncAdminRequestRetryingCaller.java | 1 + .../AsyncMasterRequestRpcRetryingCaller.java | 1 + .../hadoop/hbase/client/AsyncProcess.java | 6 +- .../AsyncServerRequestRpcRetryingCaller.java | 1 + .../hbase/client/BufferedMutatorParams.java | 1 + .../client/ColumnFamilyDescriptorBuilder.java | 1 + .../client/FastFailInterceptorContext.java | 3 + .../org/apache/hadoop/hbase/client/Get.java | 1 + .../hadoop/hbase/client/HBaseAdmin.java | 80 +++---- .../hbase/client/HTableMultiplexer.java | 2 +- .../apache/hadoop/hbase/client/Increment.java | 4 +- .../client/NoServerForRegionException.java | 2 +- .../client/PerClientRandomNonceGenerator.java | 2 + .../hbase/client/RegionInfoBuilder.java | 2 +- .../hbase/client/RegionServerCallable.java | 5 + .../org/apache/hadoop/hbase/client/Scan.java | 1 + .../coprocessor/LongColumnInterpreter.java | 3 +- .../client/security/SecurityCapability.java | 2 +- .../hadoop/hbase/filter/BinaryComparator.java | 2 + .../hbase/filter/BinaryPrefixComparator.java | 2 + .../hadoop/hbase/filter/BitComparator.java | 2 + .../hbase/filter/ColumnCountGetFilter.java | 2 + .../hbase/filter/ColumnPaginationFilter.java | 2 + .../hbase/filter/ColumnPrefixFilter.java | 2 + .../hbase/filter/ColumnRangeFilter.java | 5 +- .../hadoop/hbase/filter/CompareFilter.java | 1 + .../hbase/filter/DependentColumnFilter.java | 2 + .../hadoop/hbase/filter/FamilyFilter.java | 2 + .../hadoop/hbase/filter/FilterBase.java | 5 + .../hadoop/hbase/filter/FilterList.java | 2 + .../hbase/filter/FirstKeyOnlyFilter.java | 3 + ...FirstKeyValueMatchingQualifiersFilter.java | 2 + .../hadoop/hbase/filter/FuzzyRowFilter.java | 12 ++ .../hbase/filter/InclusiveStopFilter.java | 4 + .../hadoop/hbase/filter/KeyOnlyFilter.java | 2 + .../hbase/filter/MultiRowRangeFilter.java | 2 + .../filter/MultipleColumnPrefixFilter.java | 2 + .../hadoop/hbase/filter/NullComparator.java | 2 + .../hadoop/hbase/filter/PageFilter.java | 26 ++- .../hadoop/hbase/filter/ParseFilter.java | 7 +- .../hadoop/hbase/filter/PrefixFilter.java | 6 + .../hadoop/hbase/filter/QualifierFilter.java | 2 + .../hadoop/hbase/filter/RandomRowFilter.java | 5 +- .../apache/hadoop/hbase/filter/RowFilter.java | 2 + .../SingleColumnValueExcludeFilter.java | 3 + .../hbase/filter/SingleColumnValueFilter.java | 6 + .../hadoop/hbase/filter/SkipFilter.java | 5 + .../hbase/filter/SubstringComparator.java | 2 + .../hadoop/hbase/filter/TimestampsFilter.java | 3 + .../hadoop/hbase/filter/ValueFilter.java | 3 +- .../hadoop/hbase/filter/WhileMatchFilter.java | 4 + .../hadoop/hbase/ipc/AbstractRpcClient.java | 4 +- .../hadoop/hbase/ipc/BlockingRpcClient.java | 1 + .../apache/hadoop/hbase/ipc/ConnectionId.java | 10 +- .../hadoop/hbase/quotas/QuotaRetriever.java | 1 + .../RegionServerRunningException.java | 2 +- .../hadoop/hbase/security/SaslUtil.java | 7 +- .../hbase/security/access/Permission.java | 2 +- .../security/visibility/VisibilityClient.java | 197 +++++++++--------- .../shaded/protobuf/ResponseConverter.java | 2 +- .../org/apache/hadoop/hbase/util/PoolMap.java | 4 +- .../hadoop/hbase/TestHColumnDescriptor.java | 21 +- .../hadoop/hbase/TestHTableDescriptor.java | 2 +- .../TestInterfaceAudienceAnnotations.java | 10 +- .../hadoop/hbase/client/TestAsyncProcess.java | 118 ++++++----- .../client/TestBufferedMutatorParams.java | 20 +- .../client/TestClientExponentialBackoff.java | 8 +- .../hbase/client/TestClientScanner.java | 27 ++- .../TestColumnFamilyDescriptorBuilder.java | 26 ++- .../hbase/client/TestDelayingRunner.java | 5 +- .../hadoop/hbase/client/TestOperation.java | 22 +- .../hbase/client/TestRegionInfoDisplay.java | 38 ++-- .../client/TestSimpleRequestController.java | 33 +-- .../hbase/client/TestSnapshotFromAdmin.java | 2 +- .../client/TestTableDescriptorBuilder.java | 2 +- .../security/TestHBaseSaslRpcClient.java | 21 +- .../hadoop/hbase/security/TestSaslUtil.java | 10 +- .../org/apache/hadoop/hbase/ServerName.java | 23 +- .../replication/ReplicationTrackerZKImpl.java | 5 + .../hadoop/hbase/filter/FilterWrapper.java | 2 + .../hadoop/hbase/zookeeper/EmptyWatcher.java | 1 + .../hadoop/hbase/zookeeper/HQuorumPeer.java | 3 +- .../hbase/zookeeper/ZKLeaderManager.java | 11 +- .../apache/hadoop/hbase/zookeeper/ZKUtil.java | 9 +- .../hadoop/hbase/zookeeper/TestZKUtil.java | 8 +- 96 files changed, 617 insertions(+), 361 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java index 693b4186a57..f06d9b9f7a3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ClusterStatus.java @@ -185,7 +185,7 @@ public class ClusterStatus { int count = 0; if (liveServers != null && !liveServers.isEmpty()) { for (Map.Entry e: this.liveServers.entrySet()) { - count += e.getValue().getNumberOfRegions(); + count = count + e.getValue().getNumberOfRegions(); } } return count; @@ -217,9 +217,7 @@ public class ClusterStatus { return hbaseVersion; } - /** - * @see java.lang.Object#equals(java.lang.Object) - */ + @Override public boolean equals(Object o) { if (this == o) { return true; @@ -238,16 +236,13 @@ public class ClusterStatus { getMasterInfoPort() == other.getMasterInfoPort(); } - /** - * @see java.lang.Object#hashCode() - */ + @Override public int hashCode() { return Objects.hashCode(hbaseVersion, liveServers, deadServers, master, backupMasters, clusterId, masterInfoPort); } /** - * * @return the object version number * @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0 */ @@ -352,6 +347,7 @@ public class ClusterStatus { return masterInfoPort; } + @Override public String toString() { StringBuilder sb = new StringBuilder(1024); sb.append("Master: " + master); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java index be59f8ca67c..70392087513 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/HColumnDescriptor.java @@ -428,6 +428,7 @@ public class HColumnDescriptor implements ColumnFamilyDescriptor, Comparable { /** * @return Return a short, printable name for this region (usually encoded name) for us logging. */ + @Override public String getShortNameToLog() { return prettyPrint(this.getEncodedName()); } @@ -189,7 +190,7 @@ public class HRegionInfo implements RegionInfo, Comparable { private void setHashCode() { int result = Arrays.hashCode(this.regionName); - result ^= this.regionId; + result = (int) (result ^ this.regionId); result ^= Arrays.hashCode(this.startKey); result ^= Arrays.hashCode(this.endKey); result ^= Boolean.valueOf(this.offLine).hashCode(); @@ -473,6 +474,7 @@ public class HRegionInfo implements RegionInfo, Comparable { } /** @return the regionId */ + @Override public long getRegionId(){ return regionId; } @@ -481,6 +483,7 @@ public class HRegionInfo implements RegionInfo, Comparable { * @return the regionName as an array of bytes. * @see #getRegionNameAsString() */ + @Override public byte [] getRegionName(){ return regionName; } @@ -488,6 +491,7 @@ public class HRegionInfo implements RegionInfo, Comparable { /** * @return Region name as a String for use in logging, etc. */ + @Override public String getRegionNameAsString() { if (RegionInfo.hasEncodedName(this.regionName)) { // new format region names already have their encoded name. @@ -500,7 +504,10 @@ public class HRegionInfo implements RegionInfo, Comparable { return Bytes.toStringBinary(this.regionName) + "." + this.getEncodedName(); } - /** @return the encoded region name */ + /** + * @return the encoded region name + */ + @Override public synchronized String getEncodedName() { if (this.encodedName == null) { this.encodedName = RegionInfo.encodeRegionName(this.regionName); @@ -508,6 +515,7 @@ public class HRegionInfo implements RegionInfo, Comparable { return this.encodedName; } + @Override public synchronized byte [] getEncodedNameAsBytes() { if (this.encodedNameAsBytes == null) { this.encodedNameAsBytes = Bytes.toBytes(getEncodedName()); @@ -515,12 +523,18 @@ public class HRegionInfo implements RegionInfo, Comparable { return this.encodedNameAsBytes; } - /** @return the startKey */ + /** + * @return the startKey + */ + @Override public byte [] getStartKey(){ return startKey; } - /** @return the endKey */ + /** + * @return the endKey + */ + @Override public byte [] getEndKey(){ return endKey; } @@ -529,6 +543,7 @@ public class HRegionInfo implements RegionInfo, Comparable { * Get current table name of the region * @return TableName */ + @Override public TableName getTable() { // This method name should be getTableName but there was already a method getTableName // that returned a byte array. It is unfortunate given everywhere else, getTableName returns @@ -546,6 +561,7 @@ public class HRegionInfo implements RegionInfo, Comparable { * ["b","z"] it will return false. * @throws IllegalArgumentException if the range passed is invalid (ie. end < start) */ + @Override public boolean containsRange(byte[] rangeStartKey, byte[] rangeEndKey) { if (Bytes.compareTo(rangeStartKey, rangeEndKey) > 0) { throw new IllegalArgumentException( @@ -561,8 +577,9 @@ public class HRegionInfo implements RegionInfo, Comparable { } /** - * Return true if the given row falls in this region. + * @return true if the given row falls in this region. */ + @Override public boolean containsRow(byte[] row) { return Bytes.compareTo(row, startKey) >= 0 && (Bytes.compareTo(row, endKey) < 0 || @@ -576,7 +593,10 @@ public class HRegionInfo implements RegionInfo, Comparable { return isMetaRegion(); } - /** @return true if this region is a meta region */ + /** + * @return true if this region is a meta region + */ + @Override public boolean isMetaRegion() { return tableName.equals(HRegionInfo.FIRST_META_REGIONINFO.getTable()); } @@ -589,8 +609,9 @@ public class HRegionInfo implements RegionInfo, Comparable { } /** - * @return True if has been split and has daughters. + * @return true if has been split and has daughters. */ + @Override public boolean isSplit() { return this.split; } @@ -603,8 +624,9 @@ public class HRegionInfo implements RegionInfo, Comparable { } /** - * @return True if this region is offline. + * @return true if this region is offline. */ + @Override public boolean isOffline() { return this.offLine; } @@ -619,8 +641,9 @@ public class HRegionInfo implements RegionInfo, Comparable { } /** - * @return True if this is a split parent region. + * @return true if this is a split parent region. */ + @Override public boolean isSplitParent() { if (!isSplit()) return false; if (!isOffline()) { @@ -633,6 +656,7 @@ public class HRegionInfo implements RegionInfo, Comparable { * Returns the region replica id * @return returns region replica id */ + @Override public int getReplicaId() { return replicaId; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index e9f00f34520..63c26e2c393 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -26,7 +26,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class InvalidFamilyOperationException extends DoNotRetryIOException { - private static final long serialVersionUID = 1L << 22 - 1L; + private static final long serialVersionUID = (1L << 22) - 1L; /** default constructor */ public InvalidFamilyOperationException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java index ae47995be31..1ff17ac12cc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/MasterNotRunningException.java @@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class MasterNotRunningException extends IOException { - private static final long serialVersionUID = 1L << 23 - 1L; + private static final long serialVersionUID = (1L << 23) - 1L; /** default constructor */ public MasterNotRunningException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java index 8b43886b680..6d3ae0ce3f1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/NotServingRegionException.java @@ -29,7 +29,7 @@ import org.apache.hadoop.hbase.util.Bytes; */ @InterfaceAudience.Public public class NotServingRegionException extends IOException { - private static final long serialVersionUID = 1L << 17 - 1L; + private static final long serialVersionUID = (1L << 17) - 1L; /** default constructor */ public NotServingRegionException() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java index 8b3fbb4e590..8889dc2baa3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLocations.java @@ -21,9 +21,9 @@ package org.apache.hadoop.hbase; import java.util.Collection; import org.apache.hadoop.hbase.client.RegionInfo; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.RegionReplicaUtil; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; /** * Container for holding a list of {@link HRegionLocation}'s that correspond to the diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java index 3e5bc8b703a..69929d83d17 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableExistsException.java @@ -25,7 +25,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class TableExistsException extends DoNotRetryIOException { - private static final long serialVersionUID = 1L << 7 - 1L; + private static final long serialVersionUID = (1L << 7) - 1L; /** default constructor */ public TableExistsException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java index b0d396d7573..813c4e9334b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/TableNotDisabledException.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hbase.util.Bytes; */ @InterfaceAudience.Public public class TableNotDisabledException extends DoNotRetryIOException { - private static final long serialVersionUID = 1L << 19 - 1L; + private static final long serialVersionUID = (1L << 19) - 1L; /** default constructor */ public TableNotDisabledException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java index 60776dac245..6c614682461 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ZooKeeperConnectionException.java @@ -27,7 +27,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class ZooKeeperConnectionException extends IOException { - private static final long serialVersionUID = 1L << 23 - 1L; + private static final long serialVersionUID = (1L << 23) - 1L; /** default constructor */ public ZooKeeperConnectionException() { super(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java index da07ea6ed08..24e951200e4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Append.java @@ -86,6 +86,7 @@ public class Append extends Mutation { * A client that is not interested in the result can save network * bandwidth setting this to false. */ + @Override public Append setReturnResults(boolean returnResults) { super.setReturnResults(returnResults); return this; @@ -95,6 +96,7 @@ public class Append extends Mutation { * @return current setting for returnResults */ // This method makes public the superclasses's protected method. + @Override public boolean isReturnResults() { return super.isReturnResults(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java index f168cbf391e..a320c665151 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncAdminRequestRetryingCaller.java @@ -70,6 +70,7 @@ public class AsyncAdminRequestRetryingCaller extends AsyncRpcRetryingCaller call() { doCall(); return future; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java index 94220d485d2..c6a2335ff06 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncMasterRequestRpcRetryingCaller.java @@ -67,6 +67,7 @@ public class AsyncMasterRequestRpcRetryingCaller extends AsyncRpcRetryingCall }); } + @Override public CompletableFuture call() { doCall(); return future; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java index 5e0da594567..f6e7739047a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncProcess.java @@ -153,7 +153,7 @@ class AsyncProcess { final long pauseForCQTBE;// pause for CallQueueTooBigException, if specified final int numTries; @VisibleForTesting - int serverTrackerTimeout; + long serverTrackerTimeout; final long primaryCallTimeoutMicroseconds; /** Whether to log details for batch errors */ final boolean logBatchErrorDetails; @@ -204,9 +204,9 @@ class AsyncProcess { // If we keep hitting one server, the net effect will be the incremental backoff, and // essentially the same number of retries as planned. If we have to do faster retries, // we will do more retries in aggregate, but the user will be none the wiser. - this.serverTrackerTimeout = 0; + this.serverTrackerTimeout = 0L; for (int i = 0; i < this.numTries; ++i) { - serverTrackerTimeout += ConnectionUtils.getPauseTime(this.pause, i); + serverTrackerTimeout = serverTrackerTimeout + ConnectionUtils.getPauseTime(this.pause, i); } this.rpcCallerFactory = rpcCaller; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java index 07c9a0b1385..dbbe999c165 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncServerRequestRpcRetryingCaller.java @@ -72,6 +72,7 @@ public class AsyncServerRequestRpcRetryingCaller extends AsyncRpcRetryingCall }); } + @Override CompletableFuture call() { doCall(); return future; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java index fdb1a4a6f9d..0648501b2de 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/BufferedMutatorParams.java @@ -151,6 +151,7 @@ public class BufferedMutatorParams implements Cloneable { */ @edu.umd.cs.findbugs.annotations.SuppressWarnings(value="CN_IDIOM_NO_SUPER_CALL", justification="The clone below is complete") + @Override public BufferedMutatorParams clone() { BufferedMutatorParams clone = new BufferedMutatorParams(this.tableName); clone.writeBufferSize = this.writeBufferSize; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java index f3786e706a7..6a30de5f894 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ColumnFamilyDescriptorBuilder.java @@ -895,6 +895,7 @@ public class ColumnFamilyDescriptorBuilder { * will mask a later Put with lower ts. Set this to true to enable new semantics of versions. * We will also consider mvcc in versions. See HBASE-15968 for details. */ + @Override public boolean isNewVersionBehavior() { return getStringOrDefault(NEW_VERSION_BEHAVIOR_BYTES, Boolean::parseBoolean, DEFAULT_NEW_VERSION_BEHAVIOR); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java index 0ea165eb4b5..6b0e79096fc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/FastFailInterceptorContext.java @@ -107,6 +107,7 @@ class FastFailInterceptorContext extends RetryingCallerInterceptorContext { this.tries = tries; } + @Override public void clear() { server = null; fInfo = null; @@ -117,10 +118,12 @@ class FastFailInterceptorContext extends RetryingCallerInterceptorContext { tries = 0; } + @Override public FastFailInterceptorContext prepare(RetryingCallable callable) { return prepare(callable, 0); } + @Override public FastFailInterceptorContext prepare(RetryingCallable callable, int tries) { if (callable instanceof RegionServerCallable) { RegionServerCallable retryingCallable = (RegionServerCallable) callable; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java index adce567c107..059a5fd1eaa 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Get.java @@ -296,6 +296,7 @@ public class Get extends Query return this; } + @Override public Get setLoadColumnFamiliesOnDemand(boolean value) { return (Get) super.setLoadColumnFamiliesOnDemand(value); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index cd5e60ee3f7..2ea7c74f173 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +24,7 @@ import com.google.protobuf.RpcController; import java.io.Closeable; import java.io.IOException; import java.io.InterruptedIOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -313,7 +313,8 @@ public class HBaseAdmin implements Admin { } @Override - public List listTableDescriptors(Pattern pattern, boolean includeSysTables) throws IOException { + public List listTableDescriptors(Pattern pattern, boolean includeSysTables) + throws IOException { return executeCallable(new MasterCallable>(getConnection(), getRpcControllerFactory()) { @Override @@ -327,7 +328,8 @@ public class HBaseAdmin implements Admin { } @Override - public TableDescriptor getDescriptor(TableName tableName) throws TableNotFoundException, IOException { + public TableDescriptor getDescriptor(TableName tableName) + throws TableNotFoundException, IOException { return getTableDescriptor(tableName, getConnection(), rpcCallerFactory, rpcControllerFactory, operationTimeout, rpcTimeout); } @@ -377,7 +379,8 @@ public class HBaseAdmin implements Admin { protected List rpcCall() throws Exception { GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)); + return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), + req)); } }); } @@ -547,20 +550,23 @@ public class HBaseAdmin implements Admin { static HTableDescriptor getHTableDescriptor(final TableName tableName, Connection connection, RpcRetryingCallerFactory rpcCallerFactory, final RpcControllerFactory rpcControllerFactory, int operationTimeout, int rpcTimeout) throws IOException { - if (tableName == null) return null; + if (tableName == null) { + return null; + } HTableDescriptor htd = executeCallable(new MasterCallable(connection, rpcControllerFactory) { - @Override - protected HTableDescriptor rpcCall() throws Exception { - GetTableDescriptorsRequest req = - RequestConverter.buildGetTableDescriptorsRequest(tableName); - GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); - if (!htds.getTableSchemaList().isEmpty()) { - return new ImmutableHTableDescriptor(ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); - } - return null; - } - }, rpcCallerFactory, operationTimeout, rpcTimeout); + @Override + protected HTableDescriptor rpcCall() throws Exception { + GetTableDescriptorsRequest req = + RequestConverter.buildGetTableDescriptorsRequest(tableName); + GetTableDescriptorsResponse htds = master.getTableDescriptors(getRpcController(), req); + if (!htds.getTableSchemaList().isEmpty()) { + return new ImmutableHTableDescriptor( + ProtobufUtil.toTableDescriptor(htds.getTableSchemaList().get(0))); + } + return null; + } + }, rpcCallerFactory, operationTimeout, rpcTimeout); if (htd != null) { return new ImmutableHTableDescriptor(htd); } @@ -1146,7 +1152,6 @@ public class HBaseAdmin implements Admin { } /** - * * @param sn * @return List of {@link HRegionInfo}. * @throws IOException @@ -1573,9 +1578,8 @@ public class HBaseAdmin implements Admin { public boolean cleanerChoreSwitch(final boolean on) throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override public Boolean rpcCall() throws Exception { - return master.setCleanerChoreRunning(getRpcController(), RequestConverter - .buildSetCleanerChoreRunningRequest( - on)).getPrevValue(); + return master.setCleanerChoreRunning(getRpcController(), + RequestConverter.buildSetCleanerChoreRunningRequest(on)).getPrevValue(); } }); } @@ -1584,10 +1588,8 @@ public class HBaseAdmin implements Admin { public boolean runCleanerChore() throws IOException { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override public Boolean rpcCall() throws Exception { - return master - .runCleanerChore(getRpcController(), RequestConverter - .buildRunCleanerChoreRequest()) - .getCleanerChoreRan(); + return master.runCleanerChore(getRpcController(), + RequestConverter.buildRunCleanerChoreRequest()).getCleanerChoreRan(); } }); } @@ -1597,8 +1599,7 @@ public class HBaseAdmin implements Admin { return executeCallable(new MasterCallable(getConnection(), getRpcControllerFactory()) { @Override public Boolean rpcCall() throws Exception { return master.isCleanerChoreEnabled(getRpcController(), - RequestConverter.buildIsCleanerChoreEnabledRequest()) - .getValue(); + RequestConverter.buildIsCleanerChoreEnabledRequest()).getValue(); } }); } @@ -1676,7 +1677,8 @@ public class HBaseAdmin implements Admin { byte[][] encodedNameofRegionsToMerge = new byte[nameofRegionsToMerge.length][]; for(int i = 0; i < nameofRegionsToMerge.length; i++) { encodedNameofRegionsToMerge[i] = HRegionInfo.isEncodedRegionName(nameofRegionsToMerge[i]) ? - nameofRegionsToMerge[i] : HRegionInfo.encodeRegionName(nameofRegionsToMerge[i]).getBytes(); + nameofRegionsToMerge[i] : HRegionInfo.encodeRegionName(nameofRegionsToMerge[i]) + .getBytes(StandardCharsets.UTF_8); } TableName tableName = null; @@ -1774,7 +1776,7 @@ public class HBaseAdmin implements Admin { public Future splitRegionAsync(byte[] regionName, byte[] splitPoint) throws IOException { byte[] encodedNameofRegionToSplit = HRegionInfo.isEncodedRegionName(regionName) ? - regionName : HRegionInfo.encodeRegionName(regionName).getBytes(); + regionName : HRegionInfo.encodeRegionName(regionName).getBytes(StandardCharsets.UTF_8); Pair pair = getRegion(regionName); if (pair != null) { if (pair.getFirst() != null && @@ -2355,10 +2357,9 @@ public class HBaseAdmin implements Admin { protected HTableDescriptor[] rpcCall() throws Exception { GetTableDescriptorsRequest req = RequestConverter.buildGetTableDescriptorsRequest(tableNames); - return ProtobufUtil.toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)) - .stream() - .map(ImmutableHTableDescriptor::new) - .toArray(HTableDescriptor[]::new); + return ProtobufUtil + .toTableDescriptorList(master.getTableDescriptors(getRpcController(), req)).stream() + .map(ImmutableHTableDescriptor::new).toArray(HTableDescriptor[]::new); } }); } @@ -2746,8 +2747,8 @@ public class HBaseAdmin implements Admin { } @Override - public byte[] execProcedureWithReturn(String signature, String instance, Map props) - throws IOException { + public byte[] execProcedureWithReturn(String signature, String instance, Map props) throws IOException { ProcedureDescription desc = ProtobufUtil.buildProcedureDescription(signature, instance, props); final ExecProcedureRequest request = ExecProcedureRequest.newBuilder().setProcedure(desc).build(); @@ -2833,7 +2834,8 @@ public class HBaseAdmin implements Admin { private Future internalRestoreSnapshotAsync(final String snapshotName, final TableName tableName, final boolean restoreAcl) throws IOException, RestoreSnapshotException { - final SnapshotProtos.SnapshotDescription snapshot = SnapshotProtos.SnapshotDescription.newBuilder() + final SnapshotProtos.SnapshotDescription snapshot = + SnapshotProtos.SnapshotDescription.newBuilder() .setName(snapshotName).setTable(tableName.getNameAsString()).build(); // actually restore the snapshot @@ -2977,9 +2979,8 @@ public class HBaseAdmin implements Admin { try { internalDeleteSnapshot(snapshot); } catch (IOException ex) { - LOG.info( - "Failed to delete snapshot " + snapshot.getName() + " for table " + snapshot.getTableNameAsString(), - ex); + LOG.info("Failed to delete snapshot " + snapshot.getName() + " for table " + + snapshot.getTableNameAsString(), ex); } } } @@ -3991,7 +3992,8 @@ public class HBaseAdmin implements Admin { getRpcControllerFactory()) { @Override public List rpcCall() throws ServiceException { - ListDecommissionedRegionServersRequest req = ListDecommissionedRegionServersRequest.newBuilder().build(); + ListDecommissionedRegionServersRequest req = + ListDecommissionedRegionServersRequest.newBuilder().build(); List servers = new ArrayList<>(); for (HBaseProtos.ServerName server : master .listDecommissionedRegionServers(getRpcController(), req).getServerNameList()) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java index 3a6e3b41f8c..77d4fb2923f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HTableMultiplexer.java @@ -468,7 +468,7 @@ public class HTableMultiplexer { } public long getTotalBufferedCount() { - return queue.size() + currentProcessingCount.get(); + return (long) queue.size() + currentProcessingCount.get(); } public AtomicAverageCounter getAverageLatencyCounter() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java index 52c0c598970..27cdafeb23b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Increment.java @@ -47,7 +47,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class Increment extends Mutation implements Comparable { - private static final long HEAP_OVERHEAD = ClassSize.REFERENCE + ClassSize.TIMERANGE; + private static final int HEAP_OVERHEAD = ClassSize.REFERENCE + ClassSize.TIMERANGE; private TimeRange tr = new TimeRange(); /** @@ -164,6 +164,7 @@ public class Increment extends Mutation implements Comparable { * client that is not interested in the result can save network bandwidth setting this * to false. */ + @Override public Increment setReturnResults(boolean returnResults) { super.setReturnResults(returnResults); return this; @@ -173,6 +174,7 @@ public class Increment extends Mutation implements Comparable { * @return current setting for returnResults */ // This method makes public the superclasses's protected method. + @Override public boolean isReturnResults() { return super.isReturnResults(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 2f69d120cf3..184f0c0bc0f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -25,7 +25,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class NoServerForRegionException extends DoNotRetryRegionException { - private static final long serialVersionUID = 1L << 11 - 1L; + private static final long serialVersionUID = (1L << 11) - 1L; /** default constructor */ public NoServerForRegionException() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java index ae75d74985e..c492282e2a7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/PerClientRandomNonceGenerator.java @@ -41,10 +41,12 @@ public final class PerClientRandomNonceGenerator implements NonceGenerator { this.clientId = (((long) Arrays.hashCode(clientIdBase)) << 32) + rdm.nextInt(); } + @Override public long getNonceGroup() { return this.clientId; } + @Override public long newNonce() { long result = HConstants.NO_NONCE; do { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java index 29e146d7035..e17e307205c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionInfoBuilder.java @@ -168,7 +168,7 @@ public class RegionInfoBuilder { final byte[] endKey, final long regionId, final int replicaId, boolean offLine, byte[] regionName) { int result = Arrays.hashCode(regionName); - result ^= regionId; + result = (int) (result ^ regionId); result ^= Arrays.hashCode(checkStartKey(startKey)); result ^= Arrays.hashCode(checkEndKey(endKey)); result ^= Boolean.valueOf(offLine).hashCode(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java index 1c238b9a50b..9c0f553a8e3 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionServerCallable.java @@ -106,6 +106,7 @@ public abstract class RegionServerCallable implements RetryingCallable * Override that changes call Exception from {@link Exception} to {@link IOException}. * Also does set up of the rpcController. */ + @Override public T call(int callTimeout) throws IOException { try { // Iff non-null and an instance of a SHADED rpcController, do config! Unshaded -- i.e. @@ -183,6 +184,7 @@ public abstract class RegionServerCallable implements RetryingCallable protected int getPriority() { return this.priority;} + @Override public void throwable(Throwable t, boolean retrying) { if (location != null) { getConnection().updateCachedLocations(tableName, location.getRegionInfo().getRegionName(), @@ -190,10 +192,12 @@ public abstract class RegionServerCallable implements RetryingCallable } } + @Override public String getExceptionMessageAdditionalDetail() { return "row '" + Bytes.toString(row) + "' on table '" + tableName + "' at " + location; } + @Override public long sleep(long pause, int tries) { return ConnectionUtils.getPauseTime(pause, tries); } @@ -208,6 +212,7 @@ public abstract class RegionServerCallable implements RetryingCallable return this.location.getRegionInfo(); } + @Override public void prepare(final boolean reload) throws IOException { // check table state if this is a retry if (reload && tableName != null && !tableName.equals(TableName.META_TABLE_NAME) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java index 6357d6d09c5..266785854ed 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Scan.java @@ -894,6 +894,7 @@ public class Scan extends Query { return allowPartialResults; } + @Override public Scan setLoadColumnFamiliesOnDemand(boolean value) { return (Scan) super.setLoadColumnFamiliesOnDemand(value); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java index 18b3c5b7859..2cc4bb21e70 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/LongColumnInterpreter.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hbase.util.Bytes; public class LongColumnInterpreter extends ColumnInterpreter { + @Override public Long getValue(byte[] colFamily, byte[] colQualifier, Cell kv) throws IOException { if (kv == null || kv.getValueLength() != Bytes.SIZEOF_LONG) @@ -50,7 +51,7 @@ public class LongColumnInterpreter extends ColumnInterpreter fuzzyData : fuzzyKeysData) { @@ -457,45 +458,55 @@ public class FuzzyRowFilter extends FilterBase { /** Abstracts directional comparisons based on scan direction. */ private enum Order { ASC { + @Override public boolean lt(int lhs, int rhs) { return lhs < rhs; } + @Override public boolean gt(int lhs, int rhs) { return lhs > rhs; } + @Override public byte inc(byte val) { // TODO: what about over/underflow? return (byte) (val + 1); } + @Override public boolean isMax(byte val) { return val == (byte) 0xff; } + @Override public byte min() { return 0; } }, DESC { + @Override public boolean lt(int lhs, int rhs) { return lhs > rhs; } + @Override public boolean gt(int lhs, int rhs) { return lhs < rhs; } + @Override public byte inc(byte val) { // TODO: what about over/underflow? return (byte) (val - 1); } + @Override public boolean isMax(byte val) { return val == 0; } + @Override public byte min() { return (byte) 0xFF; } @@ -618,6 +629,7 @@ public class FuzzyRowFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FuzzyRowFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java index 6e21ba40b0c..5969ba73c18 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/InclusiveStopFilter.java @@ -63,6 +63,7 @@ public class InclusiveStopFilter extends FilterBase { return ReturnCode.INCLUDE; } + @Override public boolean filterRowKey(Cell firstRowCell) { // if stopRowKey is <= buffer, then true, filter row. if (filterAllRemaining()) return true; @@ -71,6 +72,7 @@ public class InclusiveStopFilter extends FilterBase { return done; } + @Override public boolean filterAllRemaining() { return done; } @@ -85,6 +87,7 @@ public class InclusiveStopFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.InclusiveStopFilter.Builder builder = FilterProtos.InclusiveStopFilter.newBuilder(); @@ -115,6 +118,7 @@ public class InclusiveStopFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof InclusiveStopFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java index b23677b4573..606728eb8ac 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/KeyOnlyFilter.java @@ -91,6 +91,7 @@ public class KeyOnlyFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.KeyOnlyFilter.Builder builder = FilterProtos.KeyOnlyFilter.newBuilder(); @@ -120,6 +121,7 @@ public class KeyOnlyFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof KeyOnlyFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java index d0253490ed9..0911d148c72 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultiRowRangeFilter.java @@ -146,6 +146,7 @@ public class MultiRowRangeFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte[] toByteArray() { FilterProtos.MultiRowRangeFilter.Builder builder = FilterProtos.MultiRowRangeFilter .newBuilder(); @@ -194,6 +195,7 @@ public class MultiRowRangeFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized are equal to the * corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java index 88af4f4c640..90e97c47ede 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/MultipleColumnPrefixFilter.java @@ -118,6 +118,7 @@ public class MultipleColumnPrefixFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.MultipleColumnPrefixFilter.Builder builder = FilterProtos.MultipleColumnPrefixFilter.newBuilder(); @@ -155,6 +156,7 @@ public class MultipleColumnPrefixFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof MultipleColumnPrefixFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java index 6a0f234b7a2..08f37c9c019 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/NullComparator.java @@ -67,6 +67,7 @@ public class NullComparator extends ByteArrayComparable { /** * @return The comparator serialized using pb */ + @Override public byte [] toByteArray() { ComparatorProtos.NullComparator.Builder builder = ComparatorProtos.NullComparator.newBuilder(); @@ -95,6 +96,7 @@ public class NullComparator extends ByteArrayComparable { * @return true if and only if the fields of the comparator that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof NullComparator)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java index 89498962561..91c071ec0d6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PageFilter.java @@ -22,12 +22,13 @@ import java.io.IOException; import java.util.ArrayList; import org.apache.hadoop.hbase.Cell; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.exceptions.DeserializationException; -import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; +import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions; import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException; +import org.apache.hadoop.hbase.shaded.protobuf.generated.FilterProtos; + /** * Implementation of Filter interface that limits results to a specific page * size. It terminates scanning once the number of filter-passed rows is > @@ -75,16 +76,19 @@ public class PageFilter extends FilterBase { public ReturnCode filterCell(final Cell ignored) throws IOException { return ReturnCode.INCLUDE; } - + + @Override public boolean filterAllRemaining() { return this.rowsAccepted >= this.pageSize; } + @Override public boolean filterRow() { this.rowsAccepted++; return this.rowsAccepted > this.pageSize; } + @Override public boolean hasFilterRow() { return true; } @@ -99,6 +103,7 @@ public class PageFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.PageFilter.Builder builder = FilterProtos.PageFilter.newBuilder(); @@ -124,13 +129,18 @@ public class PageFilter extends FilterBase { } /** - * @param other - * @return true if and only if the fields of the filter that are serialized - * are equal to the corresponding fields in other. Used for testing. + * @param o other Filter to compare with + * @return true if and only if the fields of the filter that are serialized are equal to the + * corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { - if (o == this) return true; - if (!(o instanceof PageFilter)) return false; + if (o == this) { + return true; + } + if (!(o instanceof PageFilter)) { + return false; + } PageFilter other = (PageFilter)o; return this.getPageSize() == other.getPageSize(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java index 7d86baac280..6ebe2fe9116 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ParseFilter.java @@ -22,6 +22,7 @@ import java.lang.reflect.InvocationTargetException; import java.lang.reflect.Method; import java.nio.ByteBuffer; import java.nio.charset.CharacterCodingException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Collections; import java.util.EmptyStackException; @@ -261,7 +262,7 @@ public class ParseFilter { e.printStackTrace(); } throw new IllegalArgumentException("Incorrect filter string " + - new String(filterStringAsByteArray)); + new String(filterStringAsByteArray, StandardCharsets.UTF_8)); } /** @@ -837,9 +838,9 @@ public class ParseFilter { else if (Bytes.equals(comparatorType, ParseConstants.binaryPrefixType)) return new BinaryPrefixComparator(comparatorValue); else if (Bytes.equals(comparatorType, ParseConstants.regexStringType)) - return new RegexStringComparator(new String(comparatorValue)); + return new RegexStringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else if (Bytes.equals(comparatorType, ParseConstants.substringType)) - return new SubstringComparator(new String(comparatorValue)); + return new SubstringComparator(new String(comparatorValue, StandardCharsets.UTF_8)); else throw new IllegalArgumentException("Incorrect comparatorType"); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java index 4fb23709307..161c1a5f8ea 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/PrefixFilter.java @@ -50,6 +50,7 @@ public class PrefixFilter extends FilterBase { return prefix; } + @Override public boolean filterRowKey(Cell firstRowCell) { if (firstRowCell == null || this.prefix == null) return true; @@ -87,14 +88,17 @@ public class PrefixFilter extends FilterBase { return ReturnCode.INCLUDE; } + @Override public boolean filterRow() { return filterRow; } + @Override public void reset() { filterRow = true; } + @Override public boolean filterAllRemaining() { return passedPrefix; } @@ -109,6 +113,7 @@ public class PrefixFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.PrefixFilter.Builder builder = FilterProtos.PrefixFilter.newBuilder(); @@ -138,6 +143,7 @@ public class PrefixFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof PrefixFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index 8f3c859e7e2..3d38dc580dd 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -97,6 +97,7 @@ public class QualifierFilter extends CompareFilter { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.QualifierFilter.Builder builder = FilterProtos.QualifierFilter.newBuilder(); @@ -135,6 +136,7 @@ public class QualifierFilter extends CompareFilter { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof QualifierFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java index 1a1248ef950..7fea6e55d5b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RandomRowFilter.java @@ -87,7 +87,8 @@ public class RandomRowFilter extends FilterBase { public boolean filterRow() { return filterOutRow; } - + + @Override public boolean hasFilterRow() { return true; } @@ -115,6 +116,7 @@ public class RandomRowFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.RandomRowFilter.Builder builder = FilterProtos.RandomRowFilter.newBuilder(); @@ -144,6 +146,7 @@ public class RandomRowFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof RandomRowFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java index bfd88b9e8a9..88ff6c12919 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -114,6 +114,7 @@ public class RowFilter extends CompareFilter { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.RowFilter.Builder builder = FilterProtos.RowFilter.newBuilder(); @@ -152,6 +153,7 @@ public class RowFilter extends CompareFilter { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof RowFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java index 677cff9098c..f39834a975f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueExcludeFilter.java @@ -156,6 +156,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { } // We cleaned result row in FilterRow to be consistent with scanning process. + @Override public boolean hasFilterRow() { return true; } @@ -190,6 +191,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.SingleColumnValueExcludeFilter.Builder builder = FilterProtos.SingleColumnValueExcludeFilter.newBuilder(); @@ -232,6 +234,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SingleColumnValueExcludeFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index 498b58ffa0f..d95320a06b1 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -278,16 +278,19 @@ public class SingleColumnValueFilter extends FilterBase { return CompareFilter.compare(this.op, compareResult); } + @Override public boolean filterRow() { // If column was found, return false if it was matched, true if it was not // If column not found, return true if we filter if missing, false if not return this.foundColumn? !this.matchedColumn: this.filterIfMissing; } + @Override public boolean hasFilterRow() { return true; } + @Override public void reset() { foundColumn = false; matchedColumn = false; @@ -387,6 +390,7 @@ public class SingleColumnValueFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { return convert().toByteArray(); } @@ -425,6 +429,7 @@ public class SingleColumnValueFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SingleColumnValueFilter)) return false; @@ -443,6 +448,7 @@ public class SingleColumnValueFilter extends FilterBase { * column in whole scan. If filterIfMissing == false, all families are essential, * because of possibility of skipping the rows without any data in filtered CF. */ + @Override public boolean isFamilyEssential(byte[] name) { return !this.filterIfMissing || Bytes.equals(name, this.columnFamily); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java index adfe1c13509..d099e325f32 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SkipFilter.java @@ -96,10 +96,12 @@ public class SkipFilter extends FilterBase { return filter.transformCell(v); } + @Override public boolean filterRow() { return filterRow; } + @Override public boolean hasFilterRow() { return true; } @@ -107,6 +109,7 @@ public class SkipFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte[] toByteArray() throws IOException { FilterProtos.SkipFilter.Builder builder = FilterProtos.SkipFilter.newBuilder(); @@ -140,6 +143,7 @@ public class SkipFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof SkipFilter)) return false; @@ -148,6 +152,7 @@ public class SkipFilter extends FilterBase { return getFilter().areSerializedFieldsEqual(other.getFilter()); } + @Override public boolean isFamilyEssential(byte[] name) throws IOException { return filter.isFamilyEssential(name); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java index 69d933f2648..3a33116ff66 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SubstringComparator.java @@ -70,6 +70,7 @@ public class SubstringComparator extends ByteArrayComparable { /** * @return The comparator serialized using pb */ + @Override public byte [] toByteArray() { ComparatorProtos.SubstringComparator.Builder builder = ComparatorProtos.SubstringComparator.newBuilder(); @@ -99,6 +100,7 @@ public class SubstringComparator extends ByteArrayComparable { * @return true if and only if the fields of the comparator that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(ByteArrayComparable other) { if (other == this) return true; if (!(other instanceof SubstringComparator)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java index 2742c7fce82..e0ec83a30a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/TimestampsFilter.java @@ -135,6 +135,7 @@ public class TimestampsFilter extends FilterBase { * * @throws IOException This will never happen. */ + @Override public Cell getNextCellHint(Cell currentCell) throws IOException { if (!canHint) { return null; @@ -172,6 +173,7 @@ public class TimestampsFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte[] toByteArray() { FilterProtos.TimestampsFilter.Builder builder = FilterProtos.TimestampsFilter.newBuilder(); @@ -203,6 +205,7 @@ public class TimestampsFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof TimestampsFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java index 17de4ff2c52..805d7dbb082 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/ValueFilter.java @@ -93,6 +93,7 @@ public class ValueFilter extends CompareFilter { /** * @return The filter serialized using pb */ + @Override public byte [] toByteArray() { FilterProtos.ValueFilter.Builder builder = FilterProtos.ValueFilter.newBuilder(); @@ -128,10 +129,10 @@ public class ValueFilter extends CompareFilter { } /** - * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof ValueFilter)) return false; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java index ab6200f4fc7..a6fa27ac0db 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/WhileMatchFilter.java @@ -49,6 +49,7 @@ public class WhileMatchFilter extends FilterBase { return filter; } + @Override public void reset() throws IOException { this.filter.reset(); } @@ -110,6 +111,7 @@ public class WhileMatchFilter extends FilterBase { /** * @return The filter serialized using pb */ + @Override public byte[] toByteArray() throws IOException { FilterProtos.WhileMatchFilter.Builder builder = FilterProtos.WhileMatchFilter.newBuilder(); @@ -143,6 +145,7 @@ public class WhileMatchFilter extends FilterBase { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof WhileMatchFilter)) return false; @@ -151,6 +154,7 @@ public class WhileMatchFilter extends FilterBase { return getFilter().areSerializedFieldsEqual(other.getFilter()); } + @Override public boolean isFamilyEssential(byte[] name) throws IOException { return filter.isFamilyEssential(name); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java index 22da05a57fd..0e7f376ff6b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/AbstractRpcClient.java @@ -243,7 +243,7 @@ public abstract class AbstractRpcClient implements RpcC return null; } try { - return (Codec) Class.forName(className).newInstance(); + return (Codec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new RuntimeException("Failed getting codec " + className, e); } @@ -271,7 +271,7 @@ public abstract class AbstractRpcClient implements RpcC return null; } try { - return (CompressionCodec) Class.forName(className).newInstance(); + return (CompressionCodec) Class.forName(className).getDeclaredConstructor().newInstance(); } catch (Exception e) { throw new RuntimeException("Failed getting compressor " + className, e); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java index 2512d7b36ef..8ab20ffed05 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcClient.java @@ -67,6 +67,7 @@ public class BlockingRpcClient extends AbstractRpcClient * Creates a connection. Can be overridden by a subclass for testing. * @param remoteId - the ConnectionId to use for the connection creation. */ + @Override protected BlockingRpcConnection createConnection(ConnectionId remoteId) throws IOException { return new BlockingRpcConnection(this, remoteId); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java index b50d4237ff7..1396f1e7abc 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ConnectionId.java @@ -1,4 +1,4 @@ -/** +/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -18,9 +18,10 @@ package org.apache.hadoop.hbase.ipc; import java.net.InetSocketAddress; +import java.util.Objects; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.security.User; +import org.apache.yetus.audience.InterfaceAudience; /** * This class holds the address and the user ticket, etc. The client connections @@ -62,8 +63,7 @@ class ConnectionId { ConnectionId id = (ConnectionId) obj; return address.equals(id.address) && ((ticket != null && ticket.equals(id.ticket)) || - (ticket == id.ticket)) && - this.serviceName == id.serviceName; + (ticket == id.ticket)) && Objects.equals(this.serviceName, id.serviceName); } return false; } @@ -73,7 +73,7 @@ class ConnectionId { return hashCode(ticket,serviceName,address); } - public static int hashCode(User ticket, String serviceName, InetSocketAddress address){ + public static int hashCode(User ticket, String serviceName, InetSocketAddress address) { return (address.hashCode() + PRIME * (PRIME * serviceName.hashCode() ^ (ticket == null ? 0 : ticket.hashCode()))); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java index 3cd0b4cb9da..5d1634a392b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/quotas/QuotaRetriever.java @@ -85,6 +85,7 @@ public class QuotaRetriever implements Closeable, Iterable { } } + @Override public void close() throws IOException { if (this.table != null) { this.table.close(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java index b3a4cd3c00b..6ff3e0d8978 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/regionserver/RegionServerRunningException.java @@ -28,7 +28,7 @@ import org.apache.yetus.audience.InterfaceAudience; */ @InterfaceAudience.Public public class RegionServerRunningException extends IOException { - private static final long serialVersionUID = 1L << 31 - 1L; + private static final long serialVersionUID = (1L << 31) - 1L; /** Default Constructor */ public RegionServerRunningException() { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java index b367874f249..b30715a1e2d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/SaslUtil.java @@ -18,6 +18,7 @@ */ package org.apache.hadoop.hbase.security; +import java.nio.charset.StandardCharsets; import java.util.Map; import java.util.TreeMap; @@ -68,15 +69,15 @@ public class SaslUtil { } static String encodeIdentifier(byte[] identifier) { - return new String(Base64.encodeBase64(identifier)); + return new String(Base64.encodeBase64(identifier), StandardCharsets.UTF_8); } static byte[] decodeIdentifier(String identifier) { - return Base64.decodeBase64(identifier.getBytes()); + return Base64.decodeBase64(identifier.getBytes(StandardCharsets.UTF_8)); } static char[] encodePassword(byte[] password) { - return new String(Base64.encodeBase64(password)).toCharArray(); + return new String(Base64.encodeBase64(password), StandardCharsets.UTF_8).toCharArray(); } /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java index c9c587ef869..7ff311ef560 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/access/Permission.java @@ -46,7 +46,7 @@ public class Permission extends VersionedWritable { public enum Action { READ('R'), WRITE('W'), EXEC('X'), CREATE('C'), ADMIN('A'); - private byte code; + private final byte code; Action(char code) { this.code = (byte)code; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java index ba1f1f2a0d1..35564d626e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityClient.java @@ -19,13 +19,15 @@ package org.apache.hadoop.hbase.security.visibility; import static org.apache.hadoop.hbase.security.visibility.VisibilityConstants.LABELS_TABLE_NAME; +import com.google.protobuf.ByteString; +import com.google.protobuf.ServiceException; + import java.io.IOException; import java.util.Map; import java.util.regex.Pattern; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.yetus.audience.InterfaceAudience; import org.apache.hadoop.hbase.client.Connection; import org.apache.hadoop.hbase.client.ConnectionFactory; import org.apache.hadoop.hbase.client.Table; @@ -44,9 +46,8 @@ import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.Visibil import org.apache.hadoop.hbase.protobuf.generated.VisibilityLabelsProtos.VisibilityLabelsService; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.yetus.audience.InterfaceAudience; -import com.google.protobuf.ByteString; -import com.google.protobuf.ServiceException; /** * Utility client for doing visibility labels admin operations. @@ -122,35 +123,34 @@ public class VisibilityClient { */ public static VisibilityLabelsResponse addLabels(Connection connection, final String[] labels) throws Throwable { - try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) - throws IOException { - VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); - for (String label : labels) { - if (label.length() > 0) { - VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); - newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); - builder.addVisLabel(newBuilder.build()); - } - } - service.addLabels(controller, builder.build(), rpcCallback); - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + VisibilityLabelsRequest.Builder builder = VisibilityLabelsRequest.newBuilder(); + for (String label : labels) { + if (label.length() > 0) { + VisibilityLabel.Builder newBuilder = VisibilityLabel.newBuilder(); + newBuilder.setLabel(ByteStringer.wrap(Bytes.toBytes(label))); + builder.addVisLabel(newBuilder.build()); } - }; + } + service.addLabels(controller, builder.build(), rpcCallback); + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; Map result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -208,30 +208,31 @@ public class VisibilityClient { */ public static GetAuthsResponse getAuths(Connection connection, final String user) throws Throwable { - try (Table table = connection.getTable(LABELS_TABLE_NAME)) { - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { - GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); - getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); - GetAuthsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; + @Override + public GetAuthsResponse call(VisibilityLabelsService service) throws IOException { + GetAuthsRequest.Builder getAuthReqBuilder = GetAuthsRequest.newBuilder(); + getAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + service.getAuths(controller, getAuthReqBuilder.build(), rpcCallback); + GetAuthsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); } - }; - Map result = - table.coprocessorService(VisibilityLabelsService.class, - HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } + return response; + } + }; + Map result = + table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, + HConstants.EMPTY_BYTE_ARRAY, callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. + } } /** @@ -262,28 +263,29 @@ public class VisibilityClient { try (Table table = connection.getTable(LABELS_TABLE_NAME)) { Batch.Call callable = new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { - ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); - if (regex != null) { - // Compile the regex here to catch any regex exception earlier. - Pattern pattern = Pattern.compile(regex); - listAuthLabelsReqBuilder.setRegex(pattern.toString()); - } - service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); - ListLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; - } - }; + @Override + public ListLabelsResponse call(VisibilityLabelsService service) throws IOException { + ListLabelsRequest.Builder listAuthLabelsReqBuilder = ListLabelsRequest.newBuilder(); + if (regex != null) { + // Compile the regex here to catch any regex exception earlier. + Pattern pattern = Pattern.compile(regex); + listAuthLabelsReqBuilder.setRegex(pattern.toString()); + } + service.listLabels(controller, listAuthLabelsReqBuilder.build(), rpcCallback); + ListLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; Map result = table.coprocessorService(VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, - HConstants.EMPTY_BYTE_ARRAY, callable); + HConstants.EMPTY_BYTE_ARRAY, callable); return result.values().iterator().next(); // There will be exactly one region for labels // table and so one entry in result Map. } @@ -321,40 +323,41 @@ public class VisibilityClient { private static VisibilityLabelsResponse setOrClearAuths(Connection connection, final String[] auths, final String user, final boolean setOrClear) - throws IOException, ServiceException, Throwable { + throws IOException, ServiceException, Throwable { - try (Table table = connection.getTable(LABELS_TABLE_NAME)) { - Batch.Call callable = - new Batch.Call() { - ServerRpcController controller = new ServerRpcController(); - CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = - new CoprocessorRpcUtils.BlockingRpcCallback<>(); + try (Table table = connection.getTable(LABELS_TABLE_NAME)) { + Batch.Call callable = + new Batch.Call() { + ServerRpcController controller = new ServerRpcController(); + CoprocessorRpcUtils.BlockingRpcCallback rpcCallback = + new CoprocessorRpcUtils.BlockingRpcCallback<>(); - public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { - SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); - setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); - for (String auth : auths) { - if (auth.length() > 0) { - setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); - } + @Override + public VisibilityLabelsResponse call(VisibilityLabelsService service) throws IOException { + SetAuthsRequest.Builder setAuthReqBuilder = SetAuthsRequest.newBuilder(); + setAuthReqBuilder.setUser(ByteStringer.wrap(Bytes.toBytes(user))); + for (String auth : auths) { + if (auth.length() > 0) { + setAuthReqBuilder.addAuth((ByteString.copyFromUtf8(auth))); } - if (setOrClear) { - service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } else { - service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); - } - VisibilityLabelsResponse response = rpcCallback.get(); - if (controller.failedOnException()) { - throw controller.getFailedOn(); - } - return response; } - }; - Map result = table.coprocessorService( - VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, - callable); - return result.values().iterator().next(); // There will be exactly one region for labels - // table and so one entry in result Map. - } + if (setOrClear) { + service.setAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } else { + service.clearAuths(controller, setAuthReqBuilder.build(), rpcCallback); + } + VisibilityLabelsResponse response = rpcCallback.get(); + if (controller.failedOnException()) { + throw controller.getFailedOn(); + } + return response; + } + }; + Map result = table.coprocessorService( + VisibilityLabelsService.class, HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, + callable); + return result.values().iterator().next(); // There will be exactly one region for labels + // table and so one entry in result Map. + } } } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java index 1d938c2f348..305ec4d6e40 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/shaded/protobuf/ResponseConverter.java @@ -417,7 +417,7 @@ public final class ResponseConverter { public static Map getScanMetrics(ScanResponse response) { Map metricMap = new HashMap<>(); - if (response == null || !response.hasScanMetrics() || response.getScanMetrics() == null) { + if (response == null || !response.hasScanMetrics()) { return metricMap; } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index d7eb2bca351..f174c964ade 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -298,7 +298,7 @@ public class PoolMap implements Map { * the type of the resource */ @SuppressWarnings("serial") - public class ReusablePool extends ConcurrentLinkedQueue implements Pool { + public static class ReusablePool extends ConcurrentLinkedQueue implements Pool { private int maxSize; public ReusablePool(int maxSize) { @@ -342,7 +342,7 @@ public class PoolMap implements Map { * */ @SuppressWarnings("serial") - class RoundRobinPool extends CopyOnWriteArrayList implements Pool { + static class RoundRobinPool extends CopyOnWriteArrayList implements Pool { private int maxSize; private int nextResource = 0; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java index cfbfccb1c28..976dfad4a35 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHColumnDescriptor.java @@ -20,6 +20,8 @@ package org.apache.hadoop.hbase; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.nio.charset.StandardCharsets; + import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.exceptions.HBaseException; import org.apache.hadoop.hbase.io.compress.Compression; @@ -32,13 +34,17 @@ import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.apache.hadoop.hbase.util.BuilderStyleTest; import org.junit.Assert; +import org.junit.Rule; import org.junit.Test; import org.junit.experimental.categories.Category; +import org.junit.rules.ExpectedException; /** Tests the HColumnDescriptor with appropriate arguments */ @Category({MiscTests.class, SmallTests.class}) @Deprecated public class TestHColumnDescriptor { + @Rule + public ExpectedException expectedEx = ExpectedException.none(); @Test public void testPb() throws DeserializationException { HColumnDescriptor hcd = new HColumnDescriptor( @@ -87,15 +93,14 @@ public class TestHColumnDescriptor { assertEquals(v, deserializedHcd.getDFSReplication()); } + /** + * Tests HColumnDescriptor with empty familyName + */ @Test - /** Tests HColumnDescriptor with empty familyName*/ - public void testHColumnDescriptorShouldThrowIAEWhenFamiliyNameEmpty() - throws Exception { - try { - new HColumnDescriptor("".getBytes()); - } catch (IllegalArgumentException e) { - assertEquals("Column Family name can not be empty", e.getLocalizedMessage()); - } + public void testHColumnDescriptorShouldThrowIAEWhenFamilyNameEmpty() throws Exception { + expectedEx.expect(IllegalArgumentException.class); + expectedEx.expectMessage("Column Family name can not be empty"); + new HColumnDescriptor("".getBytes(StandardCharsets.UTF_8)); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java index 9bbdf5053af..c8a39574de4 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestHTableDescriptor.java @@ -108,7 +108,7 @@ public class TestHTableDescriptor { assertEquals(v, deserializedHtd.getMaxFileSize()); assertTrue(deserializedHtd.isReadOnly()); assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability()); - assertEquals(deserializedHtd.getRegionReplication(), 2); + assertEquals(2, deserializedHtd.getRegionReplication()); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java index 4d92404c92a..7b5aa5cb756 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/TestInterfaceAudienceAnnotations.java @@ -75,14 +75,14 @@ public class TestInterfaceAudienceAnnotations { private static final Log LOG = LogFactory.getLog(TestInterfaceAudienceAnnotations.class); /** Selects classes with generated in their package name */ - class GeneratedClassFilter implements ClassFinder.ClassFilter { + static class GeneratedClassFilter implements ClassFinder.ClassFilter { @Override public boolean isCandidateClass(Class c) { return c.getPackage().getName().contains("generated"); } } - class ShadedProtobufClassFilter implements ClassFinder.ClassFilter { + static class ShadedProtobufClassFilter implements ClassFinder.ClassFilter { @Override public boolean isCandidateClass(Class c) { return c.getPackage().getName(). @@ -242,7 +242,7 @@ public class TestInterfaceAudienceAnnotations { } /** Selects classes that are declared public */ - class PublicClassFilter implements ClassFinder.ClassFilter { + static class PublicClassFilter implements ClassFinder.ClassFilter { @Override public boolean isCandidateClass(Class c) { int mod = c.getModifiers(); @@ -251,7 +251,7 @@ public class TestInterfaceAudienceAnnotations { } /** Selects paths (jars and class dirs) only from the main code, not test classes */ - class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter { + static class MainCodeResourcePathFilter implements ClassFinder.ResourcePathFilter { @Override public boolean isCandidatePath(String resourcePath, boolean isJar) { return !resourcePath.contains("test-classes") && @@ -268,7 +268,7 @@ public class TestInterfaceAudienceAnnotations { * - enclosing class is not an interface * - name starts with "__CLR" */ - class CloverInstrumentationFilter implements ClassFinder.ClassFilter { + static class CloverInstrumentationFilter implements ClassFinder.ClassFilter { @Override public boolean isCandidateClass(Class clazz) { boolean clover = false; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java index f9fbe85b465..ea75ac8c73b 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestAsyncProcess.java @@ -1,5 +1,4 @@ /* - * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information @@ -25,6 +24,7 @@ import static org.junit.Assert.assertTrue; import java.io.IOException; import java.io.InterruptedIOException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -88,12 +88,13 @@ public class TestAsyncProcess { private static final Log LOG = LogFactory.getLog(TestAsyncProcess.class); private static final TableName DUMMY_TABLE = TableName.valueOf("DUMMY_TABLE"); - private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes(); - private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes(); - private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes(); - private static final byte[] FAILS = "FAILS".getBytes(); + private static final byte[] DUMMY_BYTES_1 = "DUMMY_BYTES_1".getBytes(StandardCharsets.UTF_8); + private static final byte[] DUMMY_BYTES_2 = "DUMMY_BYTES_2".getBytes(StandardCharsets.UTF_8); + private static final byte[] DUMMY_BYTES_3 = "DUMMY_BYTES_3".getBytes(StandardCharsets.UTF_8); + private static final byte[] FAILS = "FAILS".getBytes(StandardCharsets.UTF_8); private static final Configuration CONF = new Configuration(); - private static final ConnectionConfiguration CONNECTION_CONFIG = new ConnectionConfiguration(CONF); + private static final ConnectionConfiguration CONNECTION_CONFIG = + new ConnectionConfiguration(CONF); private static final ServerName sn = ServerName.valueOf("s1,1,1"); private static final ServerName sn2 = ServerName.valueOf("s2,2,2"); private static final ServerName sn3 = ServerName.valueOf("s3,3,3"); @@ -115,7 +116,8 @@ public class TestAsyncProcess { new HRegionLocation(hri1r1, sn2), new HRegionLocation(hri1r2, sn3)); private static final RegionLocations hrls2 = new RegionLocations(new HRegionLocation(hri2, sn2), new HRegionLocation(hri2r1, sn3)); - private static final RegionLocations hrls3 = new RegionLocations(new HRegionLocation(hri3, sn3), null); + private static final RegionLocations hrls3 = + new RegionLocations(new HRegionLocation(hri3, sn3), null); private static final String success = "success"; private static Exception failure = new Exception("failure"); @@ -325,7 +327,7 @@ public class TestAsyncProcess { public AsyncProcessWithFailure(ClusterConnection hc, Configuration conf, IOException ioe) { super(hc, conf, true); this.ioe = ioe; - serverTrackerTimeout = 1; + serverTrackerTimeout = 1L; } @Override @@ -351,7 +353,8 @@ public class TestAsyncProcess { return inc.getAndIncrement(); } } - class MyAsyncProcessWithReplicas extends MyAsyncProcess { + + static class MyAsyncProcessWithReplicas extends MyAsyncProcess { private Set failures = new TreeSet<>(new Bytes.ByteArrayComparator()); private long primarySleepMs = 0, replicaSleepMs = 0; private Map customPrimarySleepMs = new HashMap<>(); @@ -589,7 +592,13 @@ public class TestAsyncProcess { Random rn = new Random(); final long limit = 10 * 1024 * 1024; final int requestCount = 1 + (int) (rn.nextDouble() * 3); - long putsHeapSize = Math.abs(rn.nextLong()) % limit; + long n = rn.nextLong(); + if (n < 0) { + n = -n; + } else if (n == 0) { + n = 1; + } + long putsHeapSize = n % limit; long maxHeapSizePerRequest = putsHeapSize / requestCount; LOG.info("[testSubmitRandomSizeRequest] maxHeapSizePerRequest=" + maxHeapSizePerRequest + ", putsHeapSize=" + putsHeapSize); @@ -612,13 +621,15 @@ public class TestAsyncProcess { private void doSubmitRequest(long maxHeapSizePerRequest, long putsHeapSize) throws Exception { ClusterConnection conn = createHConnection(); - final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final String defaultClazz = + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); final long defaultHeapSizePerRequest = conn.getConfiguration().getLong( SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, SimpleRequestController.DEFAULT_HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); - conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, maxHeapSizePerRequest); + conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, + maxHeapSizePerRequest); // sn has two regions long putSizeSN = 0; @@ -640,10 +651,11 @@ public class TestAsyncProcess { int minCountSnRequest = (int) calculateRequestCount(putSizeSN, maxHeapSizePerRequest); int minCountSn2Request = (int) calculateRequestCount(putSizeSN2, maxHeapSizePerRequest); - LOG.info("Total put count:" + puts.size() + ", putSizeSN:"+ putSizeSN + ", putSizeSN2:" + putSizeSN2 - + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest - + ", minCountSnRequest:" + minCountSnRequest - + ", minCountSn2Request:" + minCountSn2Request); + LOG.info("Total put count:" + puts.size() + ", putSizeSN:"+ putSizeSN + + ", putSizeSN2:" + putSizeSN2 + + ", maxHeapSizePerRequest:" + maxHeapSizePerRequest + + ", minCountSnRequest:" + minCountSnRequest + + ", minCountSn2Request:" + minCountSn2Request); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, true); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); @@ -683,7 +695,7 @@ public class TestAsyncProcess { sum += size; } assertEquals(true, sum <= maxHeapSizePerRequest); - long value = sizePerServers.containsKey(entry.getKey()) ? sizePerServers.get(entry.getKey()) : 0L; + long value = sizePerServers.getOrDefault(entry.getKey(), 0L); sizePerServers.put(entry.getKey(), value + sum); } } @@ -694,7 +706,8 @@ public class TestAsyncProcess { assertEquals(putSizeSN2, (long) sizePerServers.get(sn2)); } // restore config. - conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, defaultHeapSizePerRequest); + conn.getConfiguration().setLong(SimpleRequestController.HBASE_CLIENT_MAX_PERREQUEST_HEAPSIZE, + defaultHeapSizePerRequest); if (defaultClazz != null) { conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, defaultClazz); @@ -731,13 +744,14 @@ public class TestAsyncProcess { final AsyncRequestFuture ars = ap.submit(null, DUMMY_TABLE, puts, false, cb, false); Assert.assertTrue(puts.isEmpty()); ars.waitUntilDone(); - Assert.assertEquals(updateCalled.get(), 1); + Assert.assertEquals(1, updateCalled.get()); } @Test public void testSubmitBusyRegion() throws Exception { ClusterConnection conn = createHConnection(); - final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final String defaultClazz = + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); @@ -765,11 +779,13 @@ public class TestAsyncProcess { public void testSubmitBusyRegionServer() throws Exception { ClusterConnection conn = createHConnection(); MyAsyncProcess ap = new MyAsyncProcess(conn, CONF); - final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final String defaultClazz = + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); SimpleRequestController controller = (SimpleRequestController) ap.requestController; - controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer)); + controller.taskCounterPerServer.put(sn2, + new AtomicInteger(controller.maxConcurrentTasksPerServer)); List puts = new ArrayList<>(4); puts.add(createPut(1, true)); @@ -780,7 +796,8 @@ public class TestAsyncProcess { ap.submit(null, DUMMY_TABLE, puts, false, null, false); Assert.assertEquals(" puts=" + puts, 1, puts.size()); - controller.taskCounterPerServer.put(sn2, new AtomicInteger(controller.maxConcurrentTasksPerServer - 1)); + controller.taskCounterPerServer.put(sn2, + new AtomicInteger(controller.maxConcurrentTasksPerServer - 1)); ap.submit(null, DUMMY_TABLE, puts, false, null, false); Assert.assertTrue(puts.isEmpty()); if (defaultClazz != null) { @@ -819,7 +836,8 @@ public class TestAsyncProcess { public void testSubmitTrue() throws IOException { ClusterConnection conn = createHConnection(); final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, false); - final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final String defaultClazz = + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); SimpleRequestController controller = (SimpleRequestController) ap.requestController; @@ -923,7 +941,8 @@ public class TestAsyncProcess { Mockito.when(conn.getConfiguration()).thenReturn(copyConf); Mockito.when(conn.getStatisticsTracker()).thenReturn(ServerStatisticTracker.create(copyConf)); Mockito.when(conn.getBackoffPolicy()).thenReturn(bp); - final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final String defaultClazz = + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); MyAsyncProcess ap = new MyAsyncProcess(conn, copyConf, false); @@ -934,7 +953,8 @@ public class TestAsyncProcess { } } - private void testTaskCount(MyAsyncProcess ap) throws InterruptedIOException, InterruptedException { + private void testTaskCount(MyAsyncProcess ap) + throws InterruptedIOException, InterruptedException { SimpleRequestController controller = (SimpleRequestController) ap.requestController; List puts = new ArrayList<>(); for (int i = 0; i != 3; ++i) { @@ -958,7 +978,8 @@ public class TestAsyncProcess { @Test public void testMaxTask() throws Exception { ClusterConnection conn = createHConnection(); - final String defaultClazz = conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); + final String defaultClazz = + conn.getConfiguration().get(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY); conn.getConfiguration().set(RequestControllerFactory.REQUEST_CONTROLLER_IMPL_CONF_KEY, SimpleRequestController.class.getName()); final MyAsyncProcess ap = new MyAsyncProcess(conn, CONF, false); @@ -966,7 +987,7 @@ public class TestAsyncProcess { for (int i = 0; i < 1000; i++) { - ap.incTaskCounters(Collections.singleton("dummy".getBytes()), sn); + ap.incTaskCounters(Collections.singleton("dummy".getBytes(StandardCharsets.UTF_8)), sn); } final Thread myThread = Thread.currentThread(); @@ -997,7 +1018,7 @@ public class TestAsyncProcess { public void run() { Threads.sleep(sleepTime); while (controller.tasksInProgress.get() > 0) { - ap.decTaskCounters(Collections.singleton("dummy".getBytes()), sn); + ap.decTaskCounters(Collections.singleton("dummy".getBytes(StandardCharsets.UTF_8)), sn); } } }; @@ -1020,8 +1041,8 @@ public class TestAsyncProcess { setMockLocation(hc, DUMMY_BYTES_1, new RegionLocations(loc1)); setMockLocation(hc, DUMMY_BYTES_2, new RegionLocations(loc2)); setMockLocation(hc, DUMMY_BYTES_3, new RegionLocations(loc3)); - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) - .thenReturn(Arrays.asList(loc1, loc2, loc3)); + Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), + Mockito.anyBoolean())).thenReturn(Arrays.asList(loc1, loc2, loc3)); setMockLocation(hc, FAILS, new RegionLocations(loc2)); return hc; } @@ -1041,8 +1062,8 @@ public class TestAsyncProcess { for (HRegionLocation loc : hrls3.getRegionLocations()) { locations.add(loc); } - Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), Mockito.anyBoolean())) - .thenReturn(locations); + Mockito.when(hc.locateRegions(Mockito.eq(DUMMY_TABLE), Mockito.anyBoolean(), + Mockito.anyBoolean())).thenReturn(locations); return hc; } @@ -1073,7 +1094,8 @@ public class TestAsyncProcess { Put put = createPut(1, true); - Assert.assertEquals(conn.getConnectionConfiguration().getWriteBufferSize(), ht.getWriteBufferSize()); + Assert.assertEquals(conn.getConnectionConfiguration().getWriteBufferSize(), + ht.getWriteBufferSize()); Assert.assertEquals(0, ht.getCurrentWriteBufferSize()); ht.mutate(put); ht.flush(); @@ -1161,13 +1183,13 @@ public class TestAsyncProcess { } catch (RetriesExhaustedException expected) { } - Assert.assertEquals(res[0], success); - Assert.assertEquals(res[1], success); - Assert.assertEquals(res[2], success); - Assert.assertEquals(res[3], success); - Assert.assertEquals(res[4], failure); - Assert.assertEquals(res[5], success); - Assert.assertEquals(res[6], failure); + Assert.assertEquals(success, res[0]); + Assert.assertEquals(success, res[1]); + Assert.assertEquals(success, res[2]); + Assert.assertEquals(success, res[3]); + Assert.assertEquals(failure, res[4]); + Assert.assertEquals(success, res[5]); + Assert.assertEquals(failure, res[6]); } @Test public void testErrorsServers() throws IOException { @@ -1179,8 +1201,8 @@ public class TestAsyncProcess { configuration.setBoolean(ConnectionImplementation.RETRIES_BY_SERVER_KEY, true); Assert.assertNotNull(ap.createServerErrorTracker()); - Assert.assertTrue(ap.serverTrackerTimeout > 200); - ap.serverTrackerTimeout = 1; + Assert.assertTrue(ap.serverTrackerTimeout > 200L); + ap.serverTrackerTimeout = 1L; Put p = createPut(1, false); mutator.mutate(p); @@ -1258,7 +1280,8 @@ public class TestAsyncProcess { @Test public void testCallQueueTooLarge() throws IOException { ClusterConnection conn = new MyConnectionImpl(CONF); - AsyncProcessWithFailure ap = new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException()); + AsyncProcessWithFailure ap = + new AsyncProcessWithFailure(conn, CONF, new CallQueueTooBigException()); BufferedMutatorParams bufferParam = createBufferedMutatorParams(ap, DUMMY_TABLE); BufferedMutatorImpl mutator = new BufferedMutatorImpl(conn, bufferParam, ap); Assert.assertNotNull(ap.createServerErrorTracker()); @@ -1298,7 +1321,7 @@ public class TestAsyncProcess { ht.multiAp = ap; ht.batch(gets, null); - Assert.assertEquals(ap.nbActions.get(), NB_REGS); + Assert.assertEquals(NB_REGS, ap.nbActions.get()); Assert.assertEquals("1 multi response per server", 2, ap.nbMultiResponse.get()); Assert.assertEquals("1 thread per server", 2, con.nbThreads.get()); @@ -1306,7 +1329,7 @@ public class TestAsyncProcess { for (int i =0; i taskCounterPerServer = new HashMap<>(); final Map taskCounterPerRegion = new HashMap<>(); - SimpleRequestController.TaskCountChecker countChecker = new SimpleRequestController.TaskCountChecker( + SimpleRequestController.TaskCountChecker countChecker = + new SimpleRequestController.TaskCountChecker( maxTotalConcurrentTasks, maxConcurrentTasksPerServer, maxConcurrentTasksPerRegion, tasksInProgress, taskCounterPerServer, taskCounterPerRegion); final long maxHeapSizePerRequest = 2 * 1024 * 1024; // unlimiited - SimpleRequestController.RequestHeapSizeChecker sizeChecker = new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); - RequestController.Checker checker = SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker)); + SimpleRequestController.RequestHeapSizeChecker sizeChecker = + new SimpleRequestController.RequestHeapSizeChecker(maxHeapSizePerRequest); + RequestController.Checker checker = + SimpleRequestController.newChecker(Arrays.asList(countChecker, sizeChecker)); ReturnCode loc1Code = checker.canTakeRow(LOC1, createPut(maxHeapSizePerRequest)); assertEquals(ReturnCode.INCLUDE, loc1Code); @@ -357,11 +362,7 @@ public class TestSimpleRequestController { controller.waitForMaximumCurrentTasks(max.get(), 123, 1, null); } catch (InterruptedIOException e) { Assert.fail(e.getMessage()); - } catch (InterruptedException e) { - // TODO Auto-generated catch block - e.printStackTrace(); - } catch (BrokenBarrierException e) { - // TODO Auto-generated catch block + } catch (InterruptedException | BrokenBarrierException e) { e.printStackTrace(); } }; diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java index b2c011c74e9..6c9aadd099a 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestSnapshotFromAdmin.java @@ -71,7 +71,7 @@ public class TestSnapshotFromAdmin { ignoreExpectedTime += HConstants.RETRY_BACKOFF[i] * pauseTime; } // the correct wait time, capping at the maxTime/tries + fudge room - final long time = pauseTime * 3 + ((maxWaitTime / numRetries) * 3) + 300; + final long time = pauseTime * 3L + ((maxWaitTime / numRetries) * 3) + 300L; assertTrue("Capped snapshot wait time isn't less that the uncapped backoff time " + "- further testing won't prove anything.", time < ignoreExpectedTime); diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java index d2a5b68bd93..3c159b2321f 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/client/TestTableDescriptorBuilder.java @@ -113,7 +113,7 @@ public class TestTableDescriptorBuilder { assertEquals(v, deserializedHtd.getMaxFileSize()); assertTrue(deserializedHtd.isReadOnly()); assertEquals(Durability.ASYNC_WAL, deserializedHtd.getDurability()); - assertEquals(deserializedHtd.getRegionReplication(), 2); + assertEquals(2, deserializedHtd.getRegionReplication()); } /** diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java index 282dc28e285..8d2d1a106f8 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestHBaseSaslRpcClient.java @@ -30,8 +30,7 @@ import static org.mockito.Mockito.when; import org.apache.hadoop.hbase.shaded.com.google.common.base.Strings; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; +import java.nio.charset.StandardCharsets; import javax.security.auth.callback.Callback; import javax.security.auth.callback.CallbackHandler; @@ -100,8 +99,10 @@ public class TestHBaseSaslRpcClient { @Test public void testSaslClientCallbackHandler() throws UnsupportedCallbackException { final Token token = createTokenMock(); - when(token.getIdentifier()).thenReturn(DEFAULT_USER_NAME.getBytes()); - when(token.getPassword()).thenReturn(DEFAULT_USER_PASSWORD.getBytes()); + when(token.getIdentifier()) + .thenReturn(DEFAULT_USER_NAME.getBytes(StandardCharsets.UTF_8)); + when(token.getPassword()) + .thenReturn(DEFAULT_USER_PASSWORD.getBytes(StandardCharsets.UTF_8)); final NameCallback nameCallback = mock(NameCallback.class); final PasswordCallback passwordCallback = mock(PasswordCallback.class); @@ -120,8 +121,10 @@ public class TestHBaseSaslRpcClient { @Test public void testSaslClientCallbackHandlerWithException() { final Token token = createTokenMock(); - when(token.getIdentifier()).thenReturn(DEFAULT_USER_NAME.getBytes()); - when(token.getPassword()).thenReturn(DEFAULT_USER_PASSWORD.getBytes()); + when(token.getIdentifier()) + .thenReturn(DEFAULT_USER_NAME.getBytes(StandardCharsets.UTF_8)); + when(token.getPassword()) + .thenReturn(DEFAULT_USER_PASSWORD.getBytes(StandardCharsets.UTF_8)); final SaslClientCallbackHandler saslClCallbackHandler = new SaslClientCallbackHandler(token); try { saslClCallbackHandler.handle(new Callback[] { mock(TextOutputCallback.class) }); @@ -291,8 +294,10 @@ public class TestHBaseSaslRpcClient { throws IOException { Token token = createTokenMock(); if (!Strings.isNullOrEmpty(principal) && !Strings.isNullOrEmpty(password)) { - when(token.getIdentifier()).thenReturn(DEFAULT_USER_NAME.getBytes()); - when(token.getPassword()).thenReturn(DEFAULT_USER_PASSWORD.getBytes()); + when(token.getIdentifier()) + .thenReturn(DEFAULT_USER_NAME.getBytes(StandardCharsets.UTF_8)); + when(token.getPassword()) + .thenReturn(DEFAULT_USER_PASSWORD.getBytes(StandardCharsets.UTF_8)); } return token; } diff --git a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java index 6c997396d5b..aea14a7d400 100644 --- a/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java +++ b/hbase-client/src/test/java/org/apache/hadoop/hbase/security/TestSaslUtil.java @@ -40,20 +40,20 @@ public class TestSaslUtil { Map props; props = SaslUtil.initSaslProperties("integrity"); - assertEquals(props.get(Sasl.QOP), "auth-int"); + assertEquals("auth-int", props.get(Sasl.QOP)); props = SaslUtil.initSaslProperties("privacy,authentication"); - assertEquals(props.get(Sasl.QOP), "auth-conf,auth"); + assertEquals("auth-conf,auth", props.get(Sasl.QOP)); props = SaslUtil.initSaslProperties("integrity,authentication,privacy"); - assertEquals(props.get(Sasl.QOP), "auth-int,auth,auth-conf"); + assertEquals("auth-int,auth,auth-conf", props.get(Sasl.QOP)); exception.expect(IllegalArgumentException.class); props = SaslUtil.initSaslProperties("xyz"); - assertEquals(props.get(Sasl.QOP), "auth"); + assertEquals("auth", props.get(Sasl.QOP)); exception.expect(IllegalArgumentException.class); props = SaslUtil.initSaslProperties(""); - assertEquals(props.get(Sasl.QOP), "auth"); + assertEquals("auth", props.get(Sasl.QOP)); } } diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java index 2c31d4488a5..b00b7073638 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -322,10 +322,27 @@ public class ServerName implements Comparable, Serializable { @Override public int compareTo(ServerName other) { - int compare = this.getHostname().compareToIgnoreCase(other.getHostname()); - if (compare != 0) return compare; + int compare; + if (other == null) { + return -1; + } + if (this.getHostname() == null) { + if (other.getHostname() != null) { + return 1; + } + } else { + if (other.getHostname() == null) { + return -1; + } + compare = this.getHostname().compareToIgnoreCase(other.getHostname()); + if (compare != 0) { + return compare; + } + } compare = this.getPort() - other.getPort(); - if (compare != 0) return compare; + if (compare != 0) { + return compare; + } return Long.compare(this.getStartcode(), other.getStartcode()); } diff --git a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java index aa72fc5352e..300a93b6025 100644 --- a/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java +++ b/hbase-replication/src/main/java/org/apache/hadoop/hbase/replication/ReplicationTrackerZKImpl.java @@ -101,6 +101,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements * Called when a new node has been created. * @param path full path of the new node */ + @Override public void nodeCreated(String path) { refreshListIfRightPath(path); } @@ -109,6 +110,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements * Called when a node has been deleted * @param path full path of the deleted node */ + @Override public void nodeDeleted(String path) { if (stopper.isStopped()) { return; @@ -127,6 +129,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements * Called when an existing node has a child node added or removed. * @param path full path of the node whose children have changed */ + @Override public void nodeChildrenChanged(String path) { if (stopper.isStopped()) { return; @@ -158,6 +161,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements * Called when a node has been deleted * @param path full path of the deleted node */ + @Override public void nodeDeleted(String path) { List peers = refreshPeersList(path); if (peers == null) { @@ -176,6 +180,7 @@ public class ReplicationTrackerZKImpl extends ReplicationStateZKBase implements * Called when an existing node has a child node added or removed. * @param path full path of the node whose children have changed */ + @Override public void nodeChildrenChanged(String path) { List peers = refreshPeersList(path); if (peers == null) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java index a7607bef3ac..850a2c5c10a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/filter/FilterWrapper.java @@ -51,6 +51,7 @@ final public class FilterWrapper extends Filter { /** * @return The filter serialized using pb */ + @Override public byte[] toByteArray() throws IOException { FilterProtos.FilterWrapper.Builder builder = FilterProtos.FilterWrapper.newBuilder(); @@ -170,6 +171,7 @@ final public class FilterWrapper extends Filter { * @return true if and only if the fields of the filter that are serialized * are equal to the corresponding fields in other. Used for testing. */ + @Override boolean areSerializedFieldsEqual(Filter o) { if (o == this) return true; if (!(o instanceof FilterWrapper)) return false; diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java index 6470faa4e92..1ec4f1d03fd 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/EmptyWatcher.java @@ -30,5 +30,6 @@ public class EmptyWatcher implements Watcher { public static final EmptyWatcher instance = new EmptyWatcher(); private EmptyWatcher() {} + @Override public void process(WatchedEvent event) {} } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java index 59fc082e2b7..93573971582 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/HQuorumPeer.java @@ -24,6 +24,7 @@ import java.io.PrintWriter; import java.net.InetAddress; import java.net.NetworkInterface; import java.net.UnknownHostException; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Enumeration; import java.util.List; @@ -155,7 +156,7 @@ public class HQuorumPeer { } File myIdFile = new File(dataDir, "myid"); - PrintWriter w = new PrintWriter(myIdFile); + PrintWriter w = new PrintWriter(myIdFile, StandardCharsets.UTF_8.name()); w.println(myId); w.close(); } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java index edd2ccdff3b..9ae3e1cba6b 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKLeaderManager.java @@ -43,6 +43,7 @@ import org.apache.zookeeper.KeeperException; public class ZKLeaderManager extends ZKListener { private static final Log LOG = LogFactory.getLog(ZKLeaderManager.class); + private final Object lock = new Object(); private final AtomicBoolean leaderExists = new AtomicBoolean(); private String leaderZNode; private byte[] nodeId; @@ -85,14 +86,14 @@ public class ZKLeaderManager extends ZKListener { private void handleLeaderChange() { try { - synchronized(leaderExists) { + synchronized(lock) { if (ZKUtil.watchAndCheckExists(watcher, leaderZNode)) { LOG.info("Found new leader for znode: "+leaderZNode); leaderExists.set(true); } else { LOG.info("Leader change, but no new leader found"); leaderExists.set(false); - leaderExists.notifyAll(); + lock.notifyAll(); } } } catch (KeeperException ke) { @@ -136,10 +137,10 @@ public class ZKLeaderManager extends ZKListener { } // wait for next chance - synchronized(leaderExists) { + synchronized(lock) { while (leaderExists.get() && !candidate.isStopped()) { try { - leaderExists.wait(); + lock.wait(); } catch (InterruptedException ie) { LOG.debug("Interrupted waiting on leader", ie); } @@ -153,7 +154,7 @@ public class ZKLeaderManager extends ZKListener { */ public void stepDownAsLeader() { try { - synchronized(leaderExists) { + synchronized(lock) { if (!leaderExists.get()) { return; } diff --git a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java index d8472a9441f..859ab48dbfd 100644 --- a/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java +++ b/hbase-zookeeper/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKUtil.java @@ -19,11 +19,14 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.BufferedReader; +import java.io.BufferedWriter; import java.io.IOException; import java.io.InputStreamReader; +import java.io.OutputStreamWriter; import java.io.PrintWriter; import java.net.InetSocketAddress; import java.net.Socket; +import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Deque; @@ -1904,8 +1907,10 @@ public class ZKUtil { socket.connect(sockAddr, timeout); socket.setSoTimeout(timeout); - try (PrintWriter out = new PrintWriter(socket.getOutputStream(), true); - BufferedReader in = new BufferedReader(new InputStreamReader(socket.getInputStream()))) { + try (PrintWriter out = new PrintWriter(new BufferedWriter( + new OutputStreamWriter(socket.getOutputStream(), StandardCharsets.UTF_8)), true); + BufferedReader in = new BufferedReader( + new InputStreamReader(socket.getInputStream(), StandardCharsets.UTF_8))) { out.println("stat"); out.flush(); ArrayList res = new ArrayList<>(); diff --git a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java index db963922833..2ea70d62404 100644 --- a/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java +++ b/hbase-zookeeper/src/test/java/org/apache/hadoop/hbase/zookeeper/TestZKUtil.java @@ -48,7 +48,7 @@ public class TestZKUtil { String node = "/hbase/testUnsecure"; ZKWatcher watcher = new ZKWatcher(conf, node, null, false); List aclList = ZKUtil.createACL(watcher, node, false); - Assert.assertEquals(aclList.size(), 1); + Assert.assertEquals(1, aclList.size()); Assert.assertTrue(aclList.contains(Ids.OPEN_ACL_UNSAFE.iterator().next())); } @@ -59,7 +59,7 @@ public class TestZKUtil { String node = "/hbase/testSecuritySingleSuperuser"; ZKWatcher watcher = new ZKWatcher(conf, node, null, false); List aclList = ZKUtil.createACL(watcher, node, true); - Assert.assertEquals(aclList.size(), 2); // 1+1, since ACL will be set for the creator by default + Assert.assertEquals(2, aclList.size()); // 1+1, since ACL will be set for the creator by default Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1")))); Assert.assertTrue(aclList.contains(Ids.CREATOR_ALL_ACL.iterator().next())); } @@ -71,7 +71,7 @@ public class TestZKUtil { String node = "/hbase/testCreateACL"; ZKWatcher watcher = new ZKWatcher(conf, node, null, false); List aclList = ZKUtil.createACL(watcher, node, true); - Assert.assertEquals(aclList.size(), 4); // 3+1, since ACL will be set for the creator by default + Assert.assertEquals(4, aclList.size()); // 3+1, since ACL will be set for the creator by default Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1")))); Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group2")))); Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user1")))); @@ -87,7 +87,7 @@ public class TestZKUtil { String node = "/hbase/testCreateACL"; ZKWatcher watcher = new ZKWatcher(conf, node, null, false); List aclList = ZKUtil.createACL(watcher, node, true); - Assert.assertEquals(aclList.size(), 3); // 3, since service user the same as one of superuser + Assert.assertEquals(3, aclList.size()); // 3, since service user the same as one of superuser Assert.assertFalse(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "@group1")))); Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("auth", "")))); Assert.assertTrue(aclList.contains(new ACL(Perms.ALL, new Id("sasl", "user5"))));