From 3bac31b2a49bca153df3b47a198667828b61f36e Mon Sep 17 00:00:00 2001 From: Apekshit Date: Fri, 27 Nov 2015 13:41:46 -0800 Subject: [PATCH] HBASE-14890 Fix broken checkstyle tool. Some javadocs had parsing errors because of wrongly formatted HTML tags. (Apekshit) Signed-off-by: stack --- .../org/apache/hadoop/hbase/client/Query.java | 2 +- .../hadoop/hbase/ipc/RpcClientImpl.java | 2 +- .../hbase/protobuf/RequestConverter.java | 2 +- .../replication/ReplicationQueueInfo.java | 2 +- .../org/apache/hadoop/hbase/HConstants.java | 10 +++--- .../java/org/apache/hadoop/hbase/Tag.java | 32 +++++++------------ .../hadoop/hbase/constraint/Constraints.java | 10 +++--- .../hbase/io/hfile/bucket/BucketCache.java | 20 ++++++------ .../mapreduce/LoadIncrementalHFiles.java | 3 +- .../hadoop/hbase/master/ServerManager.java | 5 +-- .../hadoop/hbase/master/SplitLogManager.java | 3 +- .../hbase/mob/mapreduce/SweepMapper.java | 4 +-- .../hadoop/hbase/monitoring/TaskMonitor.java | 4 +-- .../hadoop/hbase/regionserver/HRegion.java | 19 ++++------- .../MobCompactionStoreScanner.java | 2 +- .../hbase/regionserver/MobStoreScanner.java | 5 ++- .../hbase/regionserver/ScannerContext.java | 4 +-- .../hadoop/hbase/regionserver/StoreFile.java | 2 +- .../hbase/regionserver/StoreFileInfo.java | 4 +-- .../handler/OpenRegionHandler.java | 6 ++-- .../visibility/VisibilityController.java | 12 +++---- .../hadoop/hbase/util/FSRegionScanner.java | 2 +- .../apache/hadoop/hbase/util/HBaseFsck.java | 12 +++---- .../apache/hadoop/hbase/util/RegionMover.java | 20 ++++-------- .../org/apache/hadoop/hbase/wal/WALKey.java | 10 +++--- .../apache/hadoop/hbase/wal/WALSplitter.java | 4 +-- 26 files changed, 82 insertions(+), 119 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java index ac4e38d26df..53062a0f407 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/Query.java @@ -198,7 +198,7 @@ public abstract class Query extends OperationWithAttributes { } /** - * @return Map a map of column families to time ranges + * @return A map of column families to time ranges */ public Map getColumnFamilyTimeRange() { return this.colFamTimeRangeMap; diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java index 21b257fa80d..d7a00292f59 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/RpcClientImpl.java @@ -792,7 +792,7 @@ public class RpcClientImpl extends AbstractRpcClient { } /** - * Write the RPC header: + * Write the RPC header: {@code } */ private void writeConnectionHeaderPreamble(OutputStream outStream) throws IOException { // Assemble the preamble up in a buffer first and then send it. Writing individual elements, diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java index 97ce811063f..ada510b90b5 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/protobuf/RequestConverter.java @@ -924,7 +924,7 @@ public final class RequestConverter { } /** - * @see {@link #buildRollWALWriterRequest() + * @see {@link #buildRollWALWriterRequest()} */ private static RollWALWriterRequest ROLL_WAL_WRITER_REQUEST = RollWALWriterRequest.newBuilder().build(); diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java index ab9a2c20268..64eedfbd4f9 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/replication/ReplicationQueueInfo.java @@ -63,7 +63,7 @@ public class ReplicationQueueInfo { /** * Parse dead server names from znode string servername can contain "-" such as * "ip-10-46-221-101.ec2.internal", so we need skip some "-" during parsing for the following - * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125--... + * cases: 2-ip-10-46-221-101.ec2.internal,52170,1364333181125-<server name>-... */ private static void extractDeadServersFromZNodeString(String deadServerListStr, List result) { diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java index de4964c8951..ac57514335c 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/HConstants.java @@ -883,12 +883,12 @@ public final class HConstants { Pattern.compile("^coprocessor\\$([0-9]+)$", Pattern.CASE_INSENSITIVE); /** + *
    * Pattern that matches a coprocessor specification. Form is:
-   * 
-   *<coprocessor jar file location> '|' < ['|' <priority> ['|' <arguments>]]
-   * 
-   * ...where arguments are <KEY> '=' <VALUE> [,...]
-   * 

For example: hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2 + * {@code '|' ['|' ['|' ]]} + * where arguments are {@code '=' [,...]} + * For example: {@code hdfs:///foo.jar|com.foo.FooRegionObserver|1001|arg1=1,arg2=2} + *

*/ public static final Pattern CP_HTD_ATTR_VALUE_PATTERN = Pattern.compile("(^[^\\|]*)\\|([^\\|]+)\\|[\\s]*([\\d]*)[\\s]*(\\|.*)?$"); diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java index 2e7314d4c3c..36b87b1540f 100644 --- a/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java +++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/Tag.java @@ -42,24 +42,23 @@ public class Tag { private int offset = 0; private int length = 0; - // The special tag will write the length of each tag and that will be - // followed by the type and then the actual tag. - // So every time the length part is parsed we need to add + 1 byte to it to - // get the type and then get the actual tag. + /** + * The special tag will write the length of each tag and that will be + * followed by the type and then the actual tag. + * So every time the length part is parsed we need to add + 1 byte to it to + * get the type and then get the actual tag. + */ public Tag(byte tagType, String tag) { this(tagType, Bytes.toBytes(tag)); } /** - * @param tagType - * @param tag + * Format for a tag : + * {@code } tag length is serialized + * using 2 bytes only but as this will be unsigned, we can have max tag length of + * (Short.MAX_SIZE * 2) +1. It includes 1 byte type length and actual tag bytes length. */ public Tag(byte tagType, byte[] tag) { - /** - * Format for a tag : taglength is serialized - * using 2 bytes only but as this will be unsigned, we can have max taglength of - * (Short.MAX_SIZE * 2) +1. It includes 1 byte type length and actual tag bytes length. - */ int tagLength = tag.length + TYPE_LENGTH_SIZE; if (tagLength > MAX_TAG_LENGTH) { throw new IllegalArgumentException( @@ -78,10 +77,7 @@ public class Tag { * bytes content starting at offset is formatted as * a Tag blob. * The bytes to include the tag type, tag length and actual tag bytes. - * @param bytes - * byte array - * @param offset - * offset to start of Tag + * @param offset offset to start of Tag */ public Tag(byte[] bytes, int offset) { this(bytes, offset, getLength(bytes, offset)); @@ -95,12 +91,6 @@ public class Tag { * Creates a Tag from the specified byte array, starting at offset, and for length * length. Presumes bytes content starting at offset is * formatted as a Tag blob. - * @param bytes - * byte array - * @param offset - * offset to start of the Tag - * @param length - * length of the Tag */ public Tag(byte[] bytes, int offset, int length) { if (length > MAX_TAG_LENGTH) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java index a07ecd3334d..09c935db1b2 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/constraint/Constraints.java @@ -154,12 +154,10 @@ public final class Constraints { /** * Get the kv {@link Entry} in the descriptor for the specified class * - * @param desc - * {@link HTableDescriptor} to read - * @param clazz - * to search for - * @return the {@link Pair} of in the table, if that class is - * present. null otherwise. + * @param desc {@link HTableDescriptor} to read + * @param clazz To search for + * @return The {@link Pair} of {@literal } in the table, if that class is + * present. {@code NULL} otherwise. */ private static Pair getKeyValueForClass( HTableDescriptor desc, Class clazz) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java index c990ef45a8f..60249587396 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketCache.java @@ -806,7 +806,7 @@ public class BucketCache implements BlockCache, HeapSize { * Process all that are passed in even if failure being sure to remove from ramCache else we'll * never undo the references and we'll OOME. * @param entries Presumes list passed in here will be processed by this invocation only. No - * interference expected. + * interference expected. * @throws InterruptedException */ @VisibleForTesting @@ -911,23 +911,23 @@ public class BucketCache implements BlockCache, HeapSize { } /** - * Blocks until elements available in q then tries to grab as many as possible + * Blocks until elements available in {@code q} then tries to grab as many as possible * before returning. - * @param recepticle Where to stash the elements taken from queue. We clear before we use it - * just in case. + * @param receptacle Where to stash the elements taken from queue. We clear before we use it + * just in case. * @param q The queue to take from. - * @return receptical laden with elements taken from the queue or empty if none found. + * @return {@code receptacle} laden with elements taken from the queue or empty if none found. */ @VisibleForTesting static List getRAMQueueEntries(final BlockingQueue q, - final List receptical) + final List receptacle) throws InterruptedException { // Clear sets all entries to null and sets size to 0. We retain allocations. Presume it // ok even if list grew to accommodate thousands. - receptical.clear(); - receptical.add(q.take()); - q.drainTo(receptical); - return receptical; + receptacle.clear(); + receptacle.add(q.take()); + q.drainTo(receptacle); + return receptacle; } private void persistToFile() throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java index 7a59ea1945f..44be2d3abed 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/LoadIncrementalHFiles.java @@ -521,8 +521,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool { } /** - * @return A Multimap that groups LQI by likely - * bulk load region targets. + * @return A map that groups LQI by likely bulk load region targets. */ private Multimap groupOrSplitPhase(final Table table, ExecutorService pool, Deque queue, diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java index 2b0aa233d83..50f07c190ba 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/ServerManager.java @@ -143,10 +143,7 @@ public class ServerManager { private final Map rsAdmins = new HashMap(); - /** - * List of region servers that should not get any more new - * regions. - */ + /** List of region servers that should not get any more new regions. */ private final ArrayList drainingServers = new ArrayList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java index 5992b095446..8d5318e5d58 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/SplitLogManager.java @@ -399,8 +399,7 @@ public class SplitLogManager { * It removes recovering regions under /hbase/recovering-regions/[encoded region name] so that the * region server hosting the region can allow reads to the recovered region * @param serverNames servers which are just recovered - * @param isMetaRecovery whether current recovery is for the meta region on - * serverNames + * @param isMetaRecovery whether current recovery is for the meta region on {@code serverNames} */ private void removeRecoveringRegions(final Set serverNames, Boolean isMetaRecovery) { if (!isLogReplaying()) { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepMapper.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepMapper.java index 7ac628c83f9..3376046b4d4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepMapper.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mob/mapreduce/SweepMapper.java @@ -35,8 +35,8 @@ import org.apache.zookeeper.KeeperException; /** * The mapper of a sweep job. - * Takes the rows from the table and their results and map to - * where mobValue is the actual cell in HBase. + * Takes the rows from the table and their results and map to {@literal } where mobValue is the actual cell in HBase. */ @InterfaceAudience.Private public class SweepMapper extends TableMapper { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java index b620facff13..9980044a71a 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/monitoring/TaskMonitor.java @@ -157,7 +157,7 @@ public class TaskMonitor { /** * This class encapsulates an object as well as a weak reference to a proxy * that passes through calls to that object. In art form: - * + *
    *     Proxy  <------------------
    *       |                       \
    *       v                        \
@@ -166,7 +166,7 @@ public class TaskMonitor {
    * MonitoredTaskImpl            / 
    *       |                     /
    * StatAndWeakRefProxy  ------/
-   *
+   * 
* Since we only return the Proxy to the creator of the MonitorableStatus, * this means that they can leak that object, and we'll detect it * since our weak reference will go null. But, we still have the actual diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java index 9f17b17fbba..7bf4855708e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java @@ -5234,7 +5234,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi * Determines whether multiple column families are present * Precondition: familyPaths is not null * - * @param familyPaths List of Pair + * @param familyPaths List of (column family, hfilePath) */ private static boolean hasMultipleColumnFamilies(Collection> familyPaths) { boolean multipleFamilies = false; @@ -5947,10 +5947,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** * This function is to maintain backward compatibility for 0.94 filters. HBASE-6429 combines - * both filterRow & filterRow(List kvs) functions. While 0.94 code or older, it may - * not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only returns - * true when filterRow(List kvs) is overridden not the filterRow(). Therefore, the - * filterRow() will be skipped. + * both filterRow & filterRow({@code List kvs}) functions. While 0.94 code or older, + * it may not implement hasFilterRow as HBase-6429 expects because 0.94 hasFilterRow() only + * returns true when filterRow({@code List kvs}) is overridden not the filterRow(). + * Therefore, the filterRow() will be skipped. */ private boolean filterRow() throws IOException { // when hasFilterRow returns true, filter.filterRow() will be called automatically inside @@ -6950,9 +6950,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi } /** - * @param cell - * @param tags - * @return The passed-in List but with the tags from cell added. + * @return The passed-in {@code tags} but with the tags from {@code cell} added. */ private static List carryForwardTags(final Cell cell, final List tags) { if (cell.getTagsLength() <= 0) return tags; @@ -6965,12 +6963,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi /** * Run a Get against passed in store on passed row, etc. - * @param store - * @param row - * @param family - * @param tr * @return Get result. - * @throws IOException */ private List doGet(final Store store, final byte [] row, final Map.Entry> family, final TimeRange tr) diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobCompactionStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobCompactionStoreScanner.java index 822b4e1ea33..a8a461b0366 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobCompactionStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobCompactionStoreScanner.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience; import org.apache.hadoop.hbase.client.Scan; /** - * Scanner scans the MOB Store. Coalesce KeyValue stream into List + * Scanner scans the MOB Store. Coalesce KeyValue stream into {@code List} * for a single row. It's only used in the compaction of mob-enabled columns. * It outputs the normal cells and delete markers when outputDeleteMarkers is set as true. */ diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java index 363da3ea1ac..72893837b70 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MobStoreScanner.java @@ -28,9 +28,8 @@ import org.apache.hadoop.hbase.client.Scan; import org.apache.hadoop.hbase.mob.MobUtils; /** - * Scanner scans both the memstore and the MOB Store. Coalesce KeyValue stream into List - * for a single row. - * + * Scanner scans both the memstore and the MOB Store. Coalesce KeyValue stream into + * {@code List} for a single row. */ @InterfaceAudience.Private public class MobStoreScanner extends StoreScanner { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java index a9277892aa1..206223013fd 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/ScannerContext.java @@ -82,14 +82,14 @@ public class ScannerContext { * some limits and then repeatedly invoke {@link InternalScanner#next(List)} or * {@link RegionScanner#next(List)} where each invocation respects these limits separately. *

- * For example:

+   * For example: 
 {@code
    * ScannerContext context = new ScannerContext.newBuilder().setBatchLimit(5).build();
    * RegionScanner scanner = ...
    * List results = new ArrayList();
    * while(scanner.next(results, context)) {
    *   // Do something with a batch of 5 cells
    * }
-   * 
However, in the case of RPCs, the server wants to be able to define a set of + * }
However, in the case of RPCs, the server wants to be able to define a set of * limits for a particular RPC request and have those limits respected across multiple * invocations. This means that the progress made towards the limits in earlier calls will be * saved and considered in future invocations diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java index 47b76596bac..6e5f441611c 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFile.java @@ -361,7 +361,7 @@ public class StoreFile { /** * Check if this storefile was created by bulk load. * When a hfile is bulk loaded into HBase, we append - * '_SeqId_' to the hfile name, unless + * {@code '_SeqId_'} to the hfile name, unless * "hbase.mapreduce.bulkload.assign.sequenceNumbers" is * explicitly turned off. * If "hbase.mapreduce.bulkload.assign.sequenceNumbers" diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java index 6e744c146a0..fd7f1c6d66f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/StoreFileInfo.java @@ -68,8 +68,8 @@ public class StoreFileInfo { Pattern.compile("^(" + DELFILE_NAME_REGEX + ")"); /** - * Regex that will work for straight reference names (.) - * and hfilelink reference names (=-.) + * Regex that will work for straight reference names ({@code .}) + * and hfilelink reference names ({@code
=-.}) * If reference, then the regex has more than just one group. * Group 1, hfile/hfilelink pattern, is this file's id. * Group 2 '(.+)' is the reference's parent region name. diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java index f4b52d637de..87f61610e38 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/handler/OpenRegionHandler.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hbase.executor.EventHandler; import org.apache.hadoop.hbase.executor.EventType; import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode; import org.apache.hadoop.hbase.regionserver.HRegion; +import org.apache.hadoop.hbase.regionserver.Region; import org.apache.hadoop.hbase.regionserver.RegionServerAccounting; import org.apache.hadoop.hbase.regionserver.RegionServerServices; import org.apache.hadoop.hbase.regionserver.RegionServerServices.PostOpenDeployContext; @@ -218,9 +219,8 @@ public class OpenRegionHandler extends EventHandler { } /** - * Thread to run region post open tasks. Call {@link #getException()} after - * the thread finishes to check for exceptions running - * {@link RegionServerServices#postOpenDeployTasks(HRegion) + * Thread to run region post open tasks. Call {@link #getException()} after the thread finishes + * to check for exceptions running {@link RegionServerServices#postOpenDeployTasks(Region)}. */ static class PostOpenDeployTasksThread extends Thread { private Throwable exception = null; diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java index 4830935d0a6..5b8bdb37d33 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/security/visibility/VisibilityController.java @@ -450,13 +450,11 @@ public class VisibilityController extends BaseMasterAndRegionObserver implements * Checks whether cell contains any tag with type as VISIBILITY_TAG_TYPE. This * tag type is reserved and should not be explicitly set by user. * - * @param cell - * - the cell under consideration - * @param pair - an optional pair of type which would be reused - * if already set and new one will be created if null is passed - * @return a pair - if the boolean is false then it indicates - * that the cell has a RESERVERD_VIS_TAG and with boolean as true, not - * null tag indicates that a string modified tag was found. + * @param cell The cell under consideration + * @param pair An optional pair of type {@code } which would be reused if already + * set and new one will be created if NULL is passed + * @return If the boolean is false then it indicates that the cell has a RESERVERD_VIS_TAG and + * with boolean as true, not null tag indicates that a string modified tag was found. */ private Pair checkForReservedVisibilityTagPresence(Cell cell, Pair pair) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java index 630ca789d7a..b0af52b6ca8 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSRegionScanner.java @@ -34,7 +34,7 @@ import org.apache.hadoop.hbase.util.FSUtils; /** * Thread that walks over the filesystem, and computes the mappings - * BestHost> and Map> + * Region -> BestHost and Region -> {@code Map} * */ @InterfaceAudience.Private diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java index fa138bef280..9dbeed76ef4 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/HBaseFsck.java @@ -607,8 +607,8 @@ public class HBaseFsck extends Configured implements Closeable { * region servers and the masters. It makes each region's state in HDFS, in * hbase:meta, and deployments consistent. * - * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable - * error. If 0, we have a clean hbase. + * @return If > 0 , number of errors detected, if < 0 there was an unrecoverable + * error. If 0, we have a clean hbase. */ public int onlineConsistencyRepair() throws IOException, KeeperException, InterruptedException { @@ -1516,10 +1516,10 @@ public class HBaseFsck extends Configured implements Closeable { /** * Sideline a region dir (instead of deleting it) * - * @param parentDir if specified, the region will be sidelined to - * folder like .../parentDir/
/. The purpose - * is to group together similar regions sidelined, for example, those - * regions should be bulk loaded back later on. If null, it is ignored. + * @param parentDir if specified, the region will be sidelined to folder like + * {@literal .../parentDir/
/}. The purpose is to group together + * similar regions sidelined, for example, those regions should be bulk loaded back later + * on. If NULL, it is ignored. */ Path sidelineRegionDir(FileSystem fs, String parentDir, HbckInfo hi) throws IOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java index 561782e1be7..b224efcdb5f 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/RegionMover.java @@ -119,9 +119,8 @@ public class RegionMover extends AbstractHBaseTool { private int port = HConstants.DEFAULT_REGIONSERVER_PORT; /** - * Hostname to unload regions from or load regions to Valid format: or - * - * @param hostname + * @param hostname Hostname to unload regions from or load regions to. Can be either hostname + * or hostname:port. */ public RegionMoverBuilder(String hostname) { String[] splitHostname = hostname.split(":"); @@ -150,8 +149,6 @@ public class RegionMover extends AbstractHBaseTool { /** * Set the max number of threads that will be used to move regions - * @param threads - * @return RegionMoverBuilder object */ public RegionMoverBuilder maxthreads(int threads) { this.maxthreads = threads; @@ -159,11 +156,9 @@ public class RegionMover extends AbstractHBaseTool { } /** - * Path of file containing hostnames to be excluded during region movement Exclude file should - * have per line.Port is mandatory here as we can have many RS running on a single - * host - * @param excludefile - * @return RegionMoverBuilder object + * Path of file containing hostnames to be excluded during region movement. Exclude file should + * have 'host:port' per line. Port is mandatory here as we can have many RS running on a single + * host. */ public RegionMoverBuilder excludeFile(String excludefile) { this.excludeFile = excludefile; @@ -751,10 +746,7 @@ public class RegionMover extends AbstractHBaseTool { } /** - * Create an Arraylst of servers listed in exclude file - * @param excludeFile - * @return ArrayList of servers to be excluded in format - * @throws IOException + * @return List of servers from the exclude file in format 'hostname:port'. */ private ArrayList readExcludes(String excludeFile) throws IOException { ArrayList excludeServers = new ArrayList(); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java index 05acd720881..778ea48d124 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALKey.java @@ -51,8 +51,6 @@ import org.apache.hadoop.hbase.util.EnvironmentEdgeManager; import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.ByteString; - - // imports for things that haven't moved from regionserver.wal yet. import org.apache.hadoop.hbase.regionserver.wal.CompressionContext; import org.apache.hadoop.hbase.regionserver.wal.WALCellCodec; @@ -87,10 +85,10 @@ public class WALKey implements SequenceId, Comparable { /** * Will block until a write entry has been assigned by they WAL subsystem. * @return A WriteEntry gotten from local WAL subsystem. Must be completed by calling - * mvcc#complete or mvcc#completeAndWait. - * @throws InterruptedIOException - * @see - * #setWriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry) + * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} + * or + * {@link MultiVersionConcurrencyControl#complete(MultiVersionConcurrencyControl.WriteEntry)} + * @see {@link #setWriteEntry(MultiVersionConcurrencyControl.WriteEntry)} */ @InterfaceAudience.Private // For internal use only. public MultiVersionConcurrencyControl.WriteEntry getWriteEntry() throws InterruptedIOException { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java index 3741cdfa2dc..04438fd3c53 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java @@ -1643,7 +1643,7 @@ public class WALSplitter { .synchronizedMap(new TreeMap()); /** * Map key -> value layout - * :
-> Queue + * {@literal :
-> Queue} */ private Map>> serverToBufferQueueMap = new ConcurrentHashMap>>(); @@ -2173,7 +2173,7 @@ public class WALSplitter { } private TableName getTableFromLocationStr(String loc) { /** - * location key is in format #
+ * location key is in format {@literal #
} */ String[] splits = loc.split(KEY_DELIMITER); if (splits.length != 2) {