From a5169d422b0fa4e7c62d04187398a0a9aad0ade9 Mon Sep 17 00:00:00 2001 From: stack Date: Mon, 1 Dec 2014 14:30:59 -0800 Subject: [PATCH] HBASE-12526 Remove unused imports (Varun Saxena) --- .../classification/InterfaceStability.java | 17 ++++++------- .../InvalidFamilyOperationException.java | 1 - .../org/apache/hadoop/hbase/ServerLoad.java | 1 - .../org/apache/hadoop/hbase/ServerName.java | 1 - .../apache/hadoop/hbase/client/Action.java | 1 - .../org/apache/hadoop/hbase/client/Admin.java | 2 -- .../hbase/client/ClientIdGenerator.java | 5 ---- .../hbase/client/ConnectionFactory.java | 1 - .../hadoop/hbase/client/HBaseAdmin.java | 1 - .../hbase/client/HConnectionManager.java | 1 - .../hadoop/hbase/client/MultiResponse.java | 4 --- .../client/NoServerForRegionException.java | 1 - .../client/RegionCoprocessorServiceExec.java | 1 - .../client/RetriesExhaustedException.java | 1 - .../RetryingCallerInterceptorFactory.java | 1 - .../hbase/client/ReversedScannerCallable.java | 7 +++--- .../hbase/client/RowTooBigException.java | 1 - .../hadoop/hbase/client/ScannerCallable.java | 8 +++--- .../coprocessor/DoubleColumnInterpreter.java | 1 - .../client/replication/ReplicationAdmin.java | 1 - .../hbase/coprocessor/ColumnInterpreter.java | 9 +++---- .../exceptions/MergeRegionException.java | 1 - .../exceptions/RegionOpeningException.java | 1 - .../apache/hadoop/hbase/filter/Filter.java | 2 +- .../hadoop/hbase/filter/QualifierFilter.java | 4 +-- .../apache/hadoop/hbase/filter/RowFilter.java | 4 +-- .../hbase/filter/SingleColumnValueFilter.java | 4 +-- .../hbase/ipc/CoprocessorRpcChannel.java | 5 ++-- .../hbase/ipc/RemoteWithExtrasException.java | 4 +-- .../hadoop/hbase/ipc/ServerRpcController.java | 13 +++++----- .../hadoop/hbase/protobuf/ProtobufUtil.java | 1 - .../hbase/protobuf/ResponseConverter.java | 1 - .../replication/ReplicationPeerZKImpl.java | 2 -- .../org/apache/hadoop/hbase/util/PoolMap.java | 3 +-- .../hadoop/hbase/zookeeper/ZKAssign.java | 22 ++++++++-------- .../hadoop/hbase/zookeeper/ZKConfig.java | 3 --- .../org/apache/hadoop/hbase/codec/Codec.java | 9 +++---- .../hadoop/hbase/io/CellOutputStream.java | 5 ++-- .../hbase/io/util/HeapMemorySizeUtil.java | 4 +-- .../apache/hadoop/hbase/types/DataType.java | 4 +-- .../hbase/types/RawBytesFixedLength.java | 8 +++--- .../hbase/types/RawBytesTerminated.java | 10 ++++---- .../hbase/types/RawStringFixedLength.java | 10 ++++---- .../hbase/types/RawStringTerminated.java | 12 ++++----- .../util/AbstractPositionedByteRange.java | 7 +++--- .../hadoop/hbase/util/ByteBufferUtils.java | 1 - .../org/apache/hadoop/hbase/util/Bytes.java | 5 ++-- .../hadoop/hbase/util/ConcurrentIndex.java | 6 ++--- .../hadoop/hbase/util/OrderedBytes.java | 1 - .../hbase/util/PositionedByteRange.java | 3 +-- .../hadoop/hbase/util/PrettyPrinter.java | 1 - .../example/ZooKeeperScanPolicyObserver.java | 8 +++--- .../codec/prefixtree/PrefixTreeCodec.java | 1 - .../org/apache/hadoop/hbase/SplitLogTask.java | 2 -- .../apache/hadoop/hbase/TableDescriptors.java | 1 - .../backup/example/HFileArchiveManager.java | 1 - .../LongTermArchivingHFileCleaner.java | 8 +++--- .../hbase/client/ClientSideRegionScanner.java | 1 - .../hbase/client/CoprocessorHConnection.java | 1 - .../hbase/client/TableSnapshotScanner.java | 11 ++++---- .../hadoop/hbase/constraint/Constraint.java | 1 - .../hbase/constraint/ConstraintException.java | 13 +++++----- .../coordination/RegionMergeCoordination.java | 5 ++-- .../SplitLogManagerCoordination.java | 5 ++-- .../SplitLogWorkerCoordination.java | 4 +-- .../SplitTransactionCoordination.java | 5 ++-- .../ZKSplitLogManagerCoordination.java | 17 +++++++------ .../ZKSplitTransactionCoordination.java | 5 ++-- .../ZkRegionMergeCoordination.java | 3 +-- .../ZkSplitLogWorkerCoordination.java | 4 +-- .../hbase/coprocessor/CoprocessorHost.java | 1 - .../hbase/coprocessor/RegionObserver.java | 15 +++++------ .../errorhandling/ForeignExceptionSnare.java | 1 - .../hadoop/hbase/io/hfile/HFileBlock.java | 11 ++++---- .../hbase/io/hfile/HFileBlockIndex.java | 7 +++--- .../io/hfile/HFileDataBlockEncoderImpl.java | 1 - .../hadoop/hbase/io/hfile/HFileScanner.java | 3 +-- .../hadoop/hbase/io/hfile/LruBlockCache.java | 7 +++--- .../hbase/io/hfile/bucket/BucketCache.java | 5 ++-- .../hbase/ipc/BalancedQueueRpcExecutor.java | 1 - .../hadoop/hbase/mapred/RowCounter.java | 3 +-- .../hbase/mapred/TableInputFormatBase.java | 12 ++++----- .../hbase/mapred/TableRecordReader.java | 3 +-- .../hbase/mapred/TableRecordReaderImpl.java | 3 +-- .../hadoop/hbase/mapreduce/CellCounter.java | 1 - .../apache/hadoop/hbase/mapreduce/Export.java | 1 - .../hbase/mapreduce/IdentityTableReducer.java | 16 ++++++------ .../mapreduce/MultiTableInputFormatBase.java | 1 - .../mapreduce/TableRecordReaderImpl.java | 3 +-- .../mapreduce/TableSnapshotInputFormat.java | 15 +++++------ .../hbase/master/AssignmentListener.java | 1 - .../hbase/master/ClusterStatusPublisher.java | 1 - .../apache/hadoop/hbase/master/HMaster.java | 3 +-- .../hbase/master/MasterStatusServlet.java | 1 - .../hadoop/hbase/master/ServerListener.java | 1 - .../hadoop/hbase/master/ServerManager.java | 2 -- .../hadoop/hbase/master/SplitLogManager.java | 6 ++--- .../hbase/master/balancer/BalancerChore.java | 1 - .../master/balancer/BaseLoadBalancer.java | 6 ++--- .../balancer/FavoredNodeLoadBalancer.java | 21 ++++++++-------- .../master/balancer/SimpleLoadBalancer.java | 4 +-- .../balancer/StochasticLoadBalancer.java | 1 - .../master/handler/LogReplayHandler.java | 8 +++--- .../master/handler/TableEventHandler.java | 1 - .../master/handler/TotesHRegionInfo.java | 3 +-- .../snapshot/MasterSnapshotVerifier.java | 4 +-- .../procedure/MasterProcedureManager.java | 25 +++++++++---------- .../procedure/MasterProcedureManagerHost.java | 5 ++-- .../hbase/procedure/ProcedureMemberRpcs.java | 1 - .../RegionServerProcedureManagerHost.java | 4 +-- .../procedure/ZKProcedureCoordinatorRpcs.java | 1 - .../flush/FlushTableSubprocedure.java | 1 - .../MiniBatchOperationInProgress.java | 11 ++++---- .../MultiRowMutationProcessor.java | 2 -- .../hbase/regionserver/RegionScanner.java | 8 +++--- .../regionserver/ServerNonceManager.java | 5 ---- .../hbase/regionserver/SplitLogWorker.java | 10 ++++---- .../hadoop/hbase/regionserver/Store.java | 1 - .../compactions/StripeCompactor.java | 3 --- .../snapshot/RegionServerSnapshotManager.java | 2 -- .../hbase/regionserver/wal/HLogUtil.java | 2 -- .../wal/MetricsWALEditsReplay.java | 1 - .../HBaseInterClusterReplicationEndpoint.java | 4 +-- .../HbaseObjectWritableFor96Migration.java | 1 - .../AuthenticationTokenSecretManager.java | 1 - .../visibility/VisibilityController.java | 2 +- .../hadoop/hbase/snapshot/ExportSnapshot.java | 5 ---- .../snapshot/SnapshotDescriptionUtils.java | 1 - .../hadoop/hbase/snapshot/SnapshotInfo.java | 1 - .../hadoop/hbase/util/BloomFilterFactory.java | 3 +-- .../util/BoundedPriorityBlockingQueue.java | 4 --- .../hadoop/hbase/util/CompressionTest.java | 1 - .../apache/hadoop/hbase/util/ConfigUtil.java | 1 - .../hadoop/hbase/util/FSRegionScanner.java | 1 - .../FSTableDescriptorMigrationToSubdir.java | 1 - .../org/apache/hadoop/hbase/util/FSUtils.java | 4 +-- .../hadoop/hbase/util/HBaseFsckRepair.java | 1 - .../hadoop/hbase/util/HFileArchiveUtil.java | 7 +++--- .../hadoop/hbase/util/ModifyRegionUtils.java | 1 - .../hbase/util/ShutdownHookManager.java | 1 - .../hbase/util/SortedCopyOnWriteSet.java | 1 - .../hbase/util/hbck/OfflineMetaRepair.java | 1 - .../hbase/zookeeper/ClusterStatusTracker.java | 7 +++--- .../hbase/zookeeper/MiniZooKeeperCluster.java | 1 - .../hbase/zookeeper/RegionServerTracker.java | 1 - .../hadoop/hbase/zookeeper/ZKSplitLog.java | 6 ++--- 146 files changed, 247 insertions(+), 384 deletions(-) diff --git a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java index 7ec47f681fb..0573e577551 100644 --- a/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java +++ b/hbase-annotations/src/main/java/org/apache/hadoop/hbase/classification/InterfaceStability.java @@ -21,21 +21,20 @@ import java.lang.annotation.Documented; import java.lang.annotation.Retention; import java.lang.annotation.RetentionPolicy; -import org.apache.hadoop.hbase.classification.InterfaceAudience.LimitedPrivate; -import org.apache.hadoop.hbase.classification.InterfaceAudience.Private; -import org.apache.hadoop.hbase.classification.InterfaceAudience.Public; /** * Annotation to inform users of how much to rely on a particular package, * class or method not changing over time. Currently the stability can be * {@link Stable}, {@link Evolving} or {@link Unstable}.
* - * + * */ @InterfaceAudience.Public @InterfaceStability.Evolving diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java index bbe6e9ec471..492633c1886 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/InvalidFamilyOperationException.java @@ -21,7 +21,6 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import java.io.IOException; /** * Thrown if a request is table schema modification is requested but diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java index 00d22fe15b3..06a61c042a5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java @@ -23,7 +23,6 @@ package org.apache.hadoop.hbase; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos; -import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.Coprocessor; import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Strings; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java index 47a2b7d1e24..dc5ba78d962 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerName.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos; -import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.MetaRegionServer; import org.apache.hadoop.hbase.util.Addressing; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java index 18be1d9822f..2bc5d79dc9b 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Action.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.hbase.HRegionInfo; /** * A Get, Put, Increment, Append, or Delete associated with it's region. Used internally by diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java index 91d44b74ee4..39adc213bb5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Admin.java @@ -30,13 +30,11 @@ import org.apache.hadoop.hbase.ClusterStatus; import org.apache.hadoop.hbase.HColumnDescriptor; import org.apache.hadoop.hbase.HRegionInfo; import org.apache.hadoop.hbase.HTableDescriptor; -import org.apache.hadoop.hbase.MasterNotRunningException; import org.apache.hadoop.hbase.NamespaceDescriptor; import org.apache.hadoop.hbase.ServerName; import org.apache.hadoop.hbase.TableExistsException; import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.TableNotFoundException; -import org.apache.hadoop.hbase.UnknownRegionException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java index 652ae9984d4..ac6c82e8f7c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ClientIdGenerator.java @@ -20,11 +20,6 @@ package org.apache.hadoop.hbase.client; import java.io.IOException; import java.lang.management.ManagementFactory; -import java.net.Inet4Address; -import java.net.Inet6Address; -import java.net.InetAddress; -import java.net.NetworkInterface; -import java.util.Enumeration; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java index f0d7a147b40..3969d2c44f6 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ConnectionFactory.java @@ -26,7 +26,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HBaseConfiguration; -import org.apache.hadoop.hbase.TableName; import org.apache.hadoop.hbase.security.User; import org.apache.hadoop.hbase.security.UserProvider; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java index 3ce38bc21ef..508d17130a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HBaseAdmin.java @@ -139,7 +139,6 @@ import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.hbase.zookeeper.MasterAddressTracker; -import org.apache.hadoop.hbase.zookeeper.MetaTableLocator; import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.util.StringUtils; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java index b979f877a58..0378fe3f64d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/HConnectionManager.java @@ -24,7 +24,6 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hbase.ZooKeeperConnectionException; import org.apache.hadoop.hbase.security.User; /** diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java index 9f03d719705..089ccff1f84 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/MultiResponse.java @@ -19,16 +19,12 @@ package org.apache.hadoop.hbase.client; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; -import java.util.List; import java.util.Map; import java.util.TreeMap; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.util.Bytes; -import org.apache.hadoop.hbase.util.Pair; /** * A container for Result objects, grouped by regionName. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java index 44f20225843..126b1175a89 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/NoServerForRegionException.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.RegionException; /** * Thrown when no region server can be found for a region diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java index 8e9f380333a..2d62332c22f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RegionCoprocessorServiceExec.java @@ -24,7 +24,6 @@ import com.google.protobuf.Descriptors.MethodDescriptor; import com.google.protobuf.Message; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.coprocessor.Batch; import org.apache.hadoop.hbase.util.Bytes; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java index 0b4509bf6e7..0d7992165bf 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetriesExhaustedException.java @@ -20,7 +20,6 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; import java.io.IOException; import java.util.Date; import java.util.List; -import java.util.concurrent.Callable; /** * Exception thrown by HTable methods when an attempt to do something (like diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java index 9799ec006bd..3fa4f64a90c 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RetryingCallerInterceptorFactory.java @@ -25,7 +25,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; import org.apache.hadoop.hbase.classification.InterfaceAudience; -import org.apache.hadoop.hbase.client.ConnectionManager.HConnectionImplementation; /** * Factory implementation to provide the {@link HConnectionImplementation} with diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java index d41320202c5..098623392f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ReversedScannerCallable.java @@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.client.metrics.ScanMetrics; import org.apache.hadoop.hbase.ipc.RpcControllerFactory; import org.apache.hadoop.hbase.util.Bytes; -import com.google.protobuf.RpcController; /** * A reversed ScannerCallable which supports backward scanning. @@ -55,7 +54,8 @@ public class ReversedScannerCallable extends ScannerCallable { * @param scan * @param scanMetrics * @param locateStartRow The start row for locating regions - * @param rpcFactory to create an {@link RpcController} to talk to the regionserver + * @param rpcFactory to create an + * {@link com.google.protobuf.RpcController} to talk to the regionserver */ public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, byte[] locateStartRow, RpcControllerFactory rpcFactory) { @@ -69,7 +69,8 @@ public class ReversedScannerCallable extends ScannerCallable { * @param scan * @param scanMetrics * @param locateStartRow The start row for locating regions - * @param rpcFactory to create an {@link RpcController} to talk to the regionserver + * @param rpcFactory to create an + * {@link com.google.protobuf.RpcController} to talk to the regionserver * @param replicaId the replica id */ public ReversedScannerCallable(ClusterConnection connection, TableName tableName, Scan scan, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java index 98aa4f17fa0..d83f14fe012 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/RowTooBigException.java @@ -20,7 +20,6 @@ package org.apache.hadoop.hbase.client; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.RegionException; /** * Gets or Scans throw this exception if running without in-row scan flag diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java index 0fc24fd2170..5ecc3634493 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ScannerCallable.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.DNS; -import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import com.google.protobuf.TextFormat; @@ -95,9 +94,10 @@ public class ScannerCallable extends RegionServerCallable { * @param connection which connection * @param tableName table callable is on * @param scan the scan to execute - * @param scanMetrics the ScanMetrics to used, if it is null, ScannerCallable won't collect - * metrics - * @param rpcControllerFactory factory to use when creating {@link RpcController} + * @param scanMetrics the ScanMetrics to used, if it is null, + * ScannerCallable won't collect metrics + * @param rpcControllerFactory factory to use when creating + * {@link com.google.protobuf.RpcController} */ public ScannerCallable (ClusterConnection connection, TableName tableName, Scan scan, ScanMetrics scanMetrics, RpcControllerFactory rpcControllerFactory) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java index 9aac6bf87da..6db94d2f7d4 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/coprocessor/DoubleColumnInterpreter.java @@ -23,7 +23,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; import org.apache.hadoop.hbase.coprocessor.ColumnInterpreter; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.EmptyMsg; import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.DoubleMsg; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java index eb5e998dfce..16c28f002fb 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/replication/ReplicationAdmin.java @@ -22,7 +22,6 @@ import java.io.Closeable; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; -import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java index 00ae239351b..8a0cb9fd2a9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/coprocessor/ColumnInterpreter.java @@ -23,9 +23,6 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.KeyValueUtil; -import org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter; import com.google.protobuf.Message; @@ -33,7 +30,8 @@ import com.google.protobuf.Message; * Defines how value for specific column is interpreted and provides utility * methods like compare, add, multiply etc for them. Takes column family, column * qualifier and return the cell value. Its concrete implementation should - * handle null case gracefully. Refer to {@link LongColumnInterpreter} for an + * handle null case gracefully. Refer to + * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter} for an * example. *

* Takes two generic parameters and three Message parameters. @@ -130,7 +128,8 @@ Q extends Message, R extends Message> { * server side to construct the ColumnInterpreter. The server * will pass this to the {@link #initialize} * method. If there is no ColumnInterpreter specific data (for e.g., - * {@link LongColumnInterpreter}) then null should be returned. + * {@link org.apache.hadoop.hbase.client.coprocessor.LongColumnInterpreter}) + * then null should be returned. * @return the PB message */ public abstract P getRequestData(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java index b26aa87d617..b87e40068f0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/MergeRegionException.java @@ -19,7 +19,6 @@ package org.apache.hadoop.hbase.exceptions; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.RegionException; import org.apache.hadoop.hbase.client.DoNotRetryRegionException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java index efb79f9e1ed..d0bf5c8c24f 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/exceptions/RegionOpeningException.java @@ -23,7 +23,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.NotServingRegionException; -import org.apache.hadoop.ipc.RemoteException; /** * Subclass if the server knows the region is now on another server. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java index 7ef1262ecb5..729afe1480d 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/Filter.java @@ -213,7 +213,7 @@ public abstract class Filter { * @return KeyValue which must be next seeked. return null if the filter is not sure which key to * seek to next. * @throws IOException - * @Deprecated Use {@link #getNextCellHint(Cell)} instead. + * Function is Deprecated. Use {@link #getNextCellHint(Cell)} instead. */ @Deprecated abstract public KeyValue getNextKeyHint(final KeyValue currentKV) throws IOException; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java index ff2a0da04cc..bf3a5f997b0 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/QualifierFilter.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.Get; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; @@ -42,7 +41,8 @@ import com.google.protobuf.InvalidProtocolBufferException; *

* Multiple filters can be combined using {@link FilterList}. *

- * If an already known column qualifier is looked for, use {@link Get#addColumn} + * If an already known column qualifier is looked for, use + * {@link org.apache.hadoop.hbase.client.Get#addColumn} * directly rather than a filter. */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java index d975486fa93..23a1e5d268a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/RowFilter.java @@ -25,7 +25,6 @@ import java.util.ArrayList; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; import org.apache.hadoop.hbase.protobuf.generated.FilterProtos; @@ -41,7 +40,8 @@ import com.google.protobuf.InvalidProtocolBufferException; *

* Multiple filters can be combined using {@link FilterList}. *

- * If an already known row range needs to be scanned, use {@link Scan} start + * If an already known row range needs to be scanned, use + * {@link org.apache.hadoop.hbase.CellScanner} start * and stop rows directly rather than a filter. */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java index 3b1f779cd8d..55e75bae327 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/filter/SingleColumnValueFilter.java @@ -29,7 +29,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.CellUtil; -import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.exceptions.DeserializationException; import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; import org.apache.hadoop.hbase.protobuf.ProtobufUtil; @@ -53,7 +52,8 @@ import com.google.protobuf.InvalidProtocolBufferException; * long value), then you can pass in your own comparator instead. *

* You must also specify a family and qualifier. Only the value of this column - * will be tested. When using this filter on a {@link Scan} with specified + * will be tested. When using this filter on a + * {@link org.apache.hadoop.hbase.CellScanner} with specified * inputs, the column to be tested should also be added as input (otherwise * the filter will regard the column as missing). *

diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java index c10a098e2c3..bdc85f067fe 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/CoprocessorRpcChannel.java @@ -28,7 +28,6 @@ import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcChannel; import com.google.protobuf.RpcController; -import com.google.protobuf.Service; import com.google.protobuf.ServiceException; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -37,8 +36,8 @@ import java.io.IOException; /** * Base class which provides clients with an RPC connection to - * call coprocessor endpoint {@link Service}s. Note that clients should not use this class - * directly, except through + * call coprocessor endpoint {@link com.google.protobuf.Service}s. + * Note that clients should not use this class directly, except through * {@link org.apache.hadoop.hbase.client.HTableInterface#coprocessorService(byte[])}. */ @InterfaceAudience.Public diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java index 47e89371ade..46356f83b41 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RemoteWithExtrasException.java @@ -19,12 +19,12 @@ package org.apache.hadoop.hbase.ipc; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.DoNotRetryIOException; import org.apache.hadoop.ipc.RemoteException; /** * A {@link RemoteException} with some extra information. If source exception - * was a {@link DoNotRetryIOException}, {@link #isDoNotRetry()} will return true. + * was a {@link org.apache.hadoop.hbase.DoNotRetryIOException}, + * {@link #isDoNotRetry()} will return true. *

A {@link RemoteException} hosts exceptions we got from the server. */ @SuppressWarnings("serial") diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java index 09da0d9e3b7..76a88ea0980 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcController.java @@ -23,11 +23,8 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.util.StringUtils; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; import com.google.protobuf.RpcCallback; import com.google.protobuf.RpcController; -import com.google.protobuf.Service; import org.apache.hadoop.util.StringUtils; import java.io.IOException; @@ -35,10 +32,11 @@ import java.io.IOException; /** * Used for server-side protobuf RPC service invocations. This handler allows * invocation exceptions to easily be passed through to the RPC server from coprocessor - * {@link Service} implementations. + * {@link com.google.protobuf.Service} implementations. * *

- * When implementing {@link Service} defined methods, coprocessor endpoints can use the following + * When implementing {@link com.google.protobuf.Service} defined methods, + * coprocessor endpoints can use the following * pattern to pass exceptions back to the RPC client: * * public void myMethod(RpcController controller, MyRequest request, RpcCallback done) { @@ -59,7 +57,8 @@ import java.io.IOException; public class ServerRpcController implements RpcController { /** * The exception thrown within - * {@link Service#callMethod(Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)}, + * {@link com.google.protobuf.Service#callMethod( + * Descriptors.MethodDescriptor, RpcController, Message, RpcCallback)}, * if any. */ // TODO: it would be good widen this to just Throwable, but IOException is what we allow now @@ -103,7 +102,7 @@ public class ServerRpcController implements RpcController { } /** - * Sets an exception to be communicated back to the {@link Service} client. + * Sets an exception to be communicated back to the {@link com.google.protobuf.Service} client. * @param ioe the exception encountered during execution of the service method */ public void setFailedOn(IOException ioe) { diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java index b8a391bb4bc..6d82444269e 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ProtobufUtil.java @@ -35,7 +35,6 @@ import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.NavigableSet; -import java.util.concurrent.TimeUnit; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java index ca6b67eb263..8b7e6f2020a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/ResponseConverter.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hbase.protobuf.generated.MasterProtos.RunCatalogScanRes import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.GetLastFlushedSequenceIdResponse; import org.apache.hadoop.hbase.regionserver.RegionOpeningState; import org.apache.hadoop.hbase.security.access.UserPermission; -import org.apache.hadoop.hbase.util.Pair; import org.apache.hadoop.util.StringUtils; import com.google.protobuf.ByteString; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java index 848303cbb5f..d20ab4474e8 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationPeerZKImpl.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.replication; import java.io.Closeable; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java index 7d27b7a9dec..9d0319b7d21 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/util/PoolMap.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; -import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Set; @@ -282,7 +281,7 @@ public class PoolMap implements Map { /** * The ReusablePool represents a {@link PoolMap.Pool} that builds - * on the {@link LinkedList} class. It essentially allows resources to be + * on the {@link java.util.LinkedList} class. It essentially allows resources to be * checked out, at which point it is removed from this pool. When the resource * is no longer required, it should be returned to the pool in order to be * reused. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java index ab124678f91..297e96e7497 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKAssign.java @@ -32,8 +32,6 @@ import org.apache.hadoop.hbase.executor.EventType; import org.apache.zookeeper.AsyncCallback; import org.apache.zookeeper.KeeperException; import org.apache.zookeeper.KeeperException.Code; -import org.apache.zookeeper.KeeperException.NoNodeException; -import org.apache.zookeeper.KeeperException.NodeExistsException; import org.apache.zookeeper.data.Stat; // We should not be importing this Type here, nor a RegionTransition, etc. This class should be @@ -123,7 +121,8 @@ public class ZKAssign { * Creates a new unassigned node in the OFFLINE state for the specified region. * *

Does not transition nodes from other states. If a node already exists - * for this region, a {@link NodeExistsException} will be thrown. + * for this region, a {@link org.apache.zookeeper.KeeperException.NodeExistsException} + * will be thrown. * *

Sets a watcher on the unassigned region node if the method is successful. * @@ -247,7 +246,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -275,7 +274,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -302,7 +301,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -330,7 +329,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -359,7 +358,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -387,7 +386,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -417,7 +416,7 @@ public class ZKAssign { * specified region. * *

If a node does not already exist for this region, a - * {@link NoNodeException} will be thrown. + * {@link org.apache.zookeeper.KeeperException.NoNodeException} will be thrown. * *

No watcher is set whether this succeeds or not. * @@ -504,7 +503,8 @@ public class ZKAssign { * region. * *

Does not transition nodes from any states. If a node already exists - * for this region, a {@link NodeExistsException} will be thrown. + * for this region, a {@link org.apache.zookeeper.KeeperException.NodeExistsException} + * will be thrown. * *

If creation is successful, returns the version number of the CLOSING * node created. diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java index 9238ebbabe7..e4aedc49235 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/ZKConfig.java @@ -20,8 +20,6 @@ package org.apache.hadoop.hbase.zookeeper; import java.io.IOException; import java.io.InputStream; -import java.net.InetAddress; -import java.net.UnknownHostException; import java.util.ArrayList; import java.util.List; import java.util.Map.Entry; @@ -32,7 +30,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hbase.HConstants; -import org.apache.hadoop.util.StringUtils; /** * Utility methods for reading, and building the ZooKeeper configuration. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java index 7801e3b8810..4c8aad1bd87 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/codec/Codec.java @@ -24,18 +24,17 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.CellScanner; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.io.CellOutputStream; -import org.apache.hadoop.hbase.io.encoding.DataBlockEncoder; /** * Encoder/Decoder for Cell. * - *

Like {@link DataBlockEncoder} only Cell-based rather than KeyValue version 1 based - * and without presuming an hfile context. Intent is an Interface that will work for hfile and - * rpc. + *

Like {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder} + * only Cell-based rather than KeyValue version 1 based and without presuming + * an hfile context. Intent is an Interface that will work for hfile and rpc. */ @InterfaceAudience.LimitedPrivate({HBaseInterfaceAudience.COPROC, HBaseInterfaceAudience.PHOENIX}) public interface Codec { - // TODO: interfacing with {@link DataBlockEncoder} + // TODO: interfacing with {@link org.apache.hadoop.hbase.io.encoding.DataBlockEncoder} /** * Call flush when done. Some encoders may not put anything on the stream until flush is called. * On flush, let go of any resources used by the encoder. diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java index 4750413fa0d..cdd74dde225 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/CellOutputStream.java @@ -23,15 +23,14 @@ import java.io.IOException; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; -import org.apache.hadoop.hbase.CellScanner; /** * Accepts a stream of Cells. This can be used to build a block of cells during compactions * and flushes, or to build a byte[] to send to the client. This could be backed by a * List, but more efficient implementations will append results to a * byte[] to eliminate overhead, and possibly encode the cells further. - *

To read Cells, use {@link CellScanner} - * @see CellScanner + *

To read Cells, use {@link org.apache.hadoop.hbase.CellScanner} + * @see org.apache.hadoop.hbase.CellScanner */ @InterfaceAudience.Private @InterfaceStability.Evolving diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/HeapMemorySizeUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/HeapMemorySizeUtil.java index c85f0342ff5..f464db99d5e 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/HeapMemorySizeUtil.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/io/util/HeapMemorySizeUtil.java @@ -73,7 +73,7 @@ public class HeapMemorySizeUtil { /** * Retrieve global memstore configured size as percentage of total heap. - * @param conf + * @param c * @param logInvalid */ public static float getGlobalMemStorePercent(final Configuration c, final boolean logInvalid) { @@ -91,7 +91,7 @@ public class HeapMemorySizeUtil { /** * Retrieve configured size for global memstore lower water mark as percentage of total heap. - * @param conf + * @param c * @param globalMemStorePercent */ public static float getGlobalMemStoreLowerMark(final Configuration c, float globalMemStorePercent) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java index 9b4ed196c51..2f98ebf52d8 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/DataType.java @@ -21,7 +21,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.util.Order; import org.apache.hadoop.hbase.util.PositionedByteRange; -import org.apache.hadoop.io.Writable; /** *

@@ -33,7 +32,8 @@ import org.apache.hadoop.io.Writable; * qualifiers. *

*

- * {@code DataType}s are different from Hadoop {@link Writable}s in two + * {@code DataType}s are different from Hadoop + * {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}s in two * significant ways. First, {@code DataType} describes how to serialize a * value, it does not encapsulate a serialized value. Second, {@code DataType} * implementations provide hints to consumers about relationships between the diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java index 03d3d0242c4..334b42f21d9 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesFixedLength.java @@ -19,15 +19,15 @@ package org.apache.hadoop.hbase.types; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Order; import org.apache.hadoop.hbase.util.PositionedByteRange; /** * An {@code DataType} that encodes fixed-length values encoded using - * {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Intended to make it - * easier to transition away from direct use of {@link Bytes}. - * @see Bytes#putBytes(byte[], int, byte[], int, int) + * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}. + * Intended to make it easier to transition away from direct use of + * {@link org.apache.hadoop.hbase.util.Bytes}. + * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int) * @see RawBytes * @see OrderedBlob * @see OrderedBlobVar diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java index 507dac15d9f..54a4c639ed0 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawBytesTerminated.java @@ -19,16 +19,16 @@ package org.apache.hadoop.hbase.types; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Order; import org.apache.hadoop.hbase.util.PositionedByteRange; /** * An {@code DataType} that encodes variable-length values encoded using - * {@link Bytes#putBytes(byte[], int, byte[], int, int)}. Includes a - * termination marker following the raw {@code byte[]} value. Intended to - * make it easier to transition away from direct use of {@link Bytes}. - * @see Bytes#putBytes(byte[], int, byte[], int, int) + * {@link org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int)}. + * Includes a termination marker following the raw {@code byte[]} value. Intended to + * make it easier to transition away from direct use of + * {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable}. + * @see org.apache.hadoop.hbase.util.Bytes#putBytes(byte[], int, byte[], int, int) * @see RawBytes * @see OrderedBlob */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java index 2b71df3abc1..d11bead3c32 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringFixedLength.java @@ -19,15 +19,15 @@ package org.apache.hadoop.hbase.types; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Order; /** * An {@code DataType} that encodes fixed-length values encoded using - * {@link Bytes#toBytes(String)}. Intended to make it easier to transition - * away from direct use of {@link Bytes}. - * @see Bytes#toBytes(String) - * @see Bytes#toString(byte[], int, int) + * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}. + * Intended to make it easier to transition away from direct use of + * {@link org.apache.hadoop.hbase.util.Bytes}. + * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String) + * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int) * @see RawString */ @InterfaceAudience.Public diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java index b860ae3e665..c96a0b93b39 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/types/RawStringTerminated.java @@ -19,16 +19,16 @@ package org.apache.hadoop.hbase.types; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; -import org.apache.hadoop.hbase.util.Bytes; import org.apache.hadoop.hbase.util.Order; /** * An {@code DataType} that encodes variable-length values encoded using - * {@link Bytes#toBytes(String)}. Includes a termination marker following the - * raw {@code byte[]} value. Intended to make it easier to transition - * away from direct use of {@link Bytes}. - * @see Bytes#toBytes(String) - * @see Bytes#toString(byte[], int, int) + * {@link org.apache.hadoop.hbase.util.Bytes#toBytes(String)}. + * Includes a termination marker following the raw {@code byte[]} value. + * Intended to make it easier to transition away from direct use of + * {@link org.apache.hadoop.hbase.util.Bytes}. + * @see org.apache.hadoop.hbase.util.Bytes#toBytes(String) + * @see org.apache.hadoop.hbase.util.Bytes#toString(byte[], int, int) * @see RawString * @see OrderedString */ diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java index 77f71430098..8d3d0cf9d90 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/AbstractPositionedByteRange.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.hbase.util; -import java.nio.ByteBuffer; import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; @@ -36,7 +35,7 @@ import com.google.common.annotations.VisibleForTesting; public abstract class AbstractPositionedByteRange extends AbstractByteRange implements PositionedByteRange { /** - * The current index into the range. Like {@link ByteBuffer} position, it + * The current index into the range. Like {@link java.nio.ByteBuffer} position, it * points to the next value that will be read/written in the array. It * provides the appearance of being 0-indexed, even though its value is * calculated according to offset. @@ -183,7 +182,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl @Override public abstract int putVLong(long val); /** - * Similar to {@link ByteBuffer#flip()}. Sets length to position, position to + * Similar to {@link java.nio.ByteBuffer#flip()}. Sets length to position, position to * offset. */ @VisibleForTesting @@ -195,7 +194,7 @@ public abstract class AbstractPositionedByteRange extends AbstractByteRange impl } /** - * Similar to {@link ByteBuffer#clear()}. Sets position to 0, length to + * Similar to {@link java.nio.ByteBuffer#clear()}. Sets position to 0, length to * capacity. */ @VisibleForTesting diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java index 26899469a8f..6f348bc91a7 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ByteBufferUtils.java @@ -17,7 +17,6 @@ package org.apache.hadoop.hbase.util; import java.io.ByteArrayOutputStream; -import java.io.DataInput; import java.io.DataInputStream; import java.io.IOException; import java.io.InputStream; diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java index 698bbdafaf3..a342a48eff2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/Bytes.java @@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.classification.InterfaceStability; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.KeyValue; -import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; @@ -1510,7 +1509,7 @@ public class Bytes { * @param b bytes to hash * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the * passed in array. This method is what {@link org.apache.hadoop.io.Text} and - * {@link ImmutableBytesWritable} use calculating hash code. + * {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable} use calculating hash code. */ public static int hashCode(final byte [] b) { return hashCode(b, b.length); @@ -1521,7 +1520,7 @@ public class Bytes { * @param length length of the value * @return Runs {@link WritableComparator#hashBytes(byte[], int)} on the * passed in array. This method is what {@link org.apache.hadoop.io.Text} and - * {@link ImmutableBytesWritable} use calculating hash code. + * {@link org.apache.hadoop.hbase.io.ImmutableBytesWritable} use calculating hash code. */ public static int hashCode(final byte [] b, final int length) { return WritableComparator.hashBytes(b, length); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java index af39d1158a3..5a889f87ba2 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/util/ConcurrentIndex.java @@ -21,10 +21,8 @@ package org.apache.hadoop.hbase.util; import com.google.common.base.Supplier; -import com.google.common.collect.Multiset; import java.util.Comparator; -import java.util.ConcurrentModificationException; import java.util.Set; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; @@ -35,7 +33,7 @@ import org.apache.hadoop.hbase.classification.InterfaceStability; /** * A simple concurrent map of sets. This is similar in concept to - * {@link Multiset}, with the following exceptions: + * {@link com.google.common.collect.Multiset}, with the following exceptions: *