From 5e6373e8ecfb99bc53860fb153dd4bc7772c911b Mon Sep 17 00:00:00 2001
From: Misty Stanley-Jones
@@ -36,7 +38,6 @@ protocols.
In order to provide a custom RPC protocol to clients, a coprocessor implementation must: -
Clients may then call the defined service methods on coprocessor instances via the {@link org.apache.hadoop.hbase.client.Table#coprocessorService(byte[])}, {@link org.apache.hadoop.hbase.client.Table#coprocessorService(Class, byte[], byte[], org.apache.hadoop.hbase.client.coprocessor.Batch.Call)}, and @@ -63,7 +63,6 @@ method invocations. Since regions are seldom handled directly in client code and the region names may change over time, the coprocessor RPC calls use row keys to identify which regions should be used for the method invocations. Clients can call coprocessor Service methods against either: -
Note that the row keys passed as parameters to the Table
methods are not passed directly to the coprocessor Service implementations.
@@ -135,12 +135,12 @@ public static abstract class RowCountService
public abstract void getRowCount(
com.google.protobuf.RpcController controller,
org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountRequest request,
- com.google.protobuf.RpcCallback<org.apache.hadoop.hbase.coprocessor.example.generated.ExampleProtos.CountResponse> done);
+ com.google.protobuf.RpcCallbackbytes
has {@link ProtobufMagic#PB_MAGIC} for a prefix.
*/
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
@@ -281,7 +279,7 @@ public final class ProtobufUtil {
}
/**
- * @param bytes bytes to check
+ * @param bytes
* @throws DeserializationException if we are missing the pb magic prefix
*/
public static void expectPBMagicPrefix(final byte [] bytes) throws DeserializationException {
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
index 67941bc1682..2d0c9406c7f 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/CellComparator.java
@@ -70,8 +70,8 @@ public class CellComparator implements Comparator
Example:
+ *
*/
public class BulkDeleteEndpoint extends BulkDeleteService implements CoprocessorService,
Coprocessor {
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
index bfed9956a73..eb0e41f9d70 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/decode/PrefixTreeArraySearcher.java
@@ -310,7 +310,7 @@ public class PrefixTreeArraySearcher extends PrefixTreeArrayReversibleScanner im
/****************** complete seek when token mismatch ******************/
/**
- * @param searcherIsAfterInputKey <0: input key is before the searcher's position
+ * Example:
* Scan scan = new Scan();
* // set scan properties(rowkey range, filters, timerange etc).
* HTable ht = ...;
@@ -93,7 +93,7 @@ import com.google.protobuf.Service;
* for (BulkDeleteResponse response : result.values()) {
* noOfDeletedRows += response.getRowsDeleted();
* }
- *
+ * @param searcherIsAfterInputKey <0: input key is before the searcher's position
* >0: input key is after the searcher's position
*/
protected CellScannerPosition fixRowTokenMissReverse(int searcherIsAfterInputKey) {
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
index 25bee1f8bda..7da78a76d8c 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/encode/tokenize/TokenizerNode.java
@@ -33,7 +33,6 @@ import com.google.common.collect.Lists;
/**
* Individual node in a Trie structure. Each node is one of 3 types:
- *
*
*
* Example inputs (numInputs=7):
* 0: AAA
@@ -550,8 +548,7 @@ public class TokenizerNode{
/********************** simple mutation methods *************************/
/**
- * Each occurrence > 1 indicates a repeat of the previous entry.
- * This can be called directly by
+ * Each occurrence > 1 indicates a repeat of the previous entry. This can be called directly by
* an external class without going through the process of detecting a repeat if it is a known
* repeat by some external mechanism. PtEncoder uses this when adding cells to a row if it knows
* the new cells are part of the current row.
diff --git a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
index 46684687a8c..a3ae097f867 100644
--- a/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
+++ b/hbase-prefix-tree/src/main/java/org/apache/hadoop/hbase/codec/prefixtree/scanner/CellSearcher.java
@@ -50,8 +50,8 @@ public interface CellSearcher extends ReversibleCellScanner {
* exact match.
*
bytes
has {@link #PB_MAGIC} for a prefix.
*/
public static boolean isPBMagicPrefix(final byte [] bytes, int offset, int len) {
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
index e8845ebd6af..2809ca9e3ce 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/client/RemoteAdmin.java
@@ -100,7 +100,7 @@ public class RemoteAdmin {
/**
* @return string representing the rest api's version
- * @throws IOException
+ * @throws IOEXception
* if the endpoint does not exist, there is a timeout, or some other
* general failure mode
*/
@@ -144,7 +144,7 @@ public class RemoteAdmin {
/**
* @return string representing the cluster's version
- * @throws IOException if the endpoint does not exist, there is a timeout, or some other general failure mode
+ * @throws IOEXception if the endpoint does not exist, there is a timeout, or some other general failure mode
*/
public StorageClusterStatusModel getClusterStatus() throws IOException {
@@ -185,7 +185,7 @@ public class RemoteAdmin {
/**
* @return string representing the cluster's version
- * @throws IOException
+ * @throws IOEXception
* if the endpoint does not exist, there is a timeout, or some other
* general failure mode
*/
@@ -357,7 +357,7 @@ public class RemoteAdmin {
/**
* @return string representing the cluster's version
- * @throws IOException
+ * @throws IOEXception
* if the endpoint does not exist, there is a timeout, or some other
* general failure mode
*/
diff --git a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
index 55f5769f5a6..25a6de32e56 100644
--- a/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
+++ b/hbase-rest/src/main/java/org/apache/hadoop/hbase/rest/model/ScannerModel.java
@@ -87,7 +87,7 @@ import com.sun.jersey.api.json.JSONUnmarshaller;
*
* * <complexType name="Scanner"> - * <sequence> + * <sequence> * <element name="column" type="base64Binary" minOccurs="0" maxOccurs="unbounded"/> * <element name="filter" type="string" minOccurs="0" maxOccurs="1"></element> * </sequence> diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java index fd19edee5d9..93eb5f19583 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/coprocessor/RegionObserver.java @@ -1096,7 +1096,7 @@ public interface RegionObserver extends Coprocessor { *
boolean filterRow()
returning truevoid filterRow(List<KeyValue> kvs)
removing all the kvs from
+ * void filterRow(List kvs)
removing all the kvs from
* the passed Listname
.
+ * The jsp scripts are taken from src/hbase-webapps/name.
* @param name The name of the server
* @param bindAddress address to bind to
* @param port The port to use on the server
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
index dbe8b243d9c..498e213bc05 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/JMXJsonServlet.java
@@ -51,34 +51,28 @@ import org.apache.hadoop.hbase.util.JSONBean;
* functionality is provided through the
* {@link MBeanServer#queryNames(ObjectName, javax.management.QueryExp)}
* method.
- *
*
* For example http://.../jmx?qry=Hadoop:*
will return
* all hadoop metrics exposed through JMX.
- *
*
* The optional get
parameter is used to query an specific
* attribute of a JMX bean. The format of the URL is
- * http://.../jmx?get=MXBeanName::AttributeName
- *
+ * http://.../jmx?get=MXBeanName::AttributeName
*
* For example
*
* http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
*
will return the cluster id of the namenode mxbean.
- *
*
* If the qry
or the get
parameter is not formatted
* correctly then a 400 BAD REQUEST http response code will be returned.
- *
*
* If a resouce such as a mbean or attribute can not be found,
* a 404 SC_NOT_FOUND http response code will be returned.
- *
*
* The return format is JSON and in the form
- *
- *
+ *
+ *
* {
* "beans" : [
* {
@@ -87,7 +81,7 @@ import org.apache.hadoop.hbase.util.JSONBean;
* }
* ]
* }
- *
+ *
*
* The servlet attempts to convert the the JMXBeans into JSON. Each
* bean's attributes will be converted to a JSON object member.
@@ -107,7 +101,6 @@ import org.apache.hadoop.hbase.util.JSONBean;
* The bean's name and modelerType will be returned for all beans.
*
* Optional paramater "callback" should be used to deliver JSONP response.
- *
*
*/
public class JMXJsonServlet extends HttpServlet {
@@ -218,4 +211,4 @@ public class JMXJsonServlet extends HttpServlet {
response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
}
}
-}
+}
\ No newline at end of file
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
index 324cc2dd851..9012ab6b629 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/jmx/package-info.java
@@ -19,7 +19,7 @@
* This package provides access to JMX primarily through the
* {@link org.apache.hadoop.hbase.http.jmx.JMXJsonServlet} class.
*
- * Copied from hadoop source code.
+ * Copied from hadoop source code.
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why.
*
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
index 1734f4085b3..469c07567b9 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/http/lib/package-info.java
@@ -26,7 +26,7 @@
* users a static configured user.
*
*
- * Copied from hadoop source code.
+ * Copied from hadoop source code.
* See https://issues.apache.org/jira/browse/HADOOP-10232 to know why
*
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
index 067d639a110..50a5baad34c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileBlockIndex.java
@@ -52,14 +52,13 @@ import org.apache.hadoop.util.StringUtils;
/**
* Provides functionality to write ({@link BlockIndexWriter}) and read
- * ({@link BlockIndexReader})
+ * ({@link org.apache.hadoop.hbase.io.hfile.BlockIndexReader})
* single-level and multi-level block indexes.
*
* Examples of how to use the block index writer can be found in
* {@link org.apache.hadoop.hbase.io.hfile.CompoundBloomFilterWriter} and
* {@link HFileWriterImpl}. Examples of how to use the reader can be
- * found in {@link HFileWriterImpl} and
- * {@link org.apache.hadoop.hbase.io.hfile.TestHFileBlockIndex}.
+ * found in {@link HFileWriterImpl} and TestHFileBlockIndex.
*/
@InterfaceAudience.Private
public class HFileBlockIndex {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
index faa8724f4b9..4e3929dd20f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/bucket/BucketAllocator.java
@@ -407,8 +407,7 @@ public final class BucketAllocator {
/**
* Allocate a block with specified size. Return the offset
* @param blockSize size of block
- * @throws BucketAllocatorException
- * @throws CacheFullException
+ * @throws BucketAllocatorException,CacheFullException
* @return the offset in the IOEngine
*/
public synchronized long allocateBlock(int blockSize) throws CacheFullException,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
index 2026febfbfb..f8ccea35a09 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapred/TableInputFormatBase.java
@@ -168,7 +168,7 @@ implements InputFormat {
/**
* Calculates the splits that will serve as input for the map tasks.
- *
+ *
* Splits are created in number equal to the smallest between numSplits and
* the number of {@link org.apache.hadoop.hbase.regionserver.HRegion}s in the table.
* If the number of splits is smaller than the number of
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
index 5c46f2ac957..e9ce5a3f0a2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/mapreduce/MultiTableSnapshotInputFormatImpl.java
@@ -118,11 +118,11 @@ public class MultiTableSnapshotInputFormatImpl {
}
/**
- * Retrieve the snapshot name -> list<scan> mapping pushed to configuration by
+ * Retrieve the snapshot name -> list mapping pushed to configuration by
* {@link #setSnapshotToScans(org.apache.hadoop.conf.Configuration, java.util.Map)}
*
- * @param conf Configuration to extract name -> list<scan> mappings from.
- * @return the snapshot name -> list<scan> mapping pushed to configuration
+ * @param conf Configuration to extract name -> list mappings from.
+ * @return the snapshot name -> list mapping pushed to configuration
* @throws IOException
*/
public Map> getSnapshotsToScans(Configuration conf) throws IOException {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
index 8bc436f424b..c41439d905b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/cleaner/BaseHFileCleanerDelegate.java
@@ -25,17 +25,14 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
*
* If other effects are needed, implement your own LogCleanerDelegate and add it to the
* configuration "hbase.master.hfilecleaner.plugins", which is a comma-separated list of fully
- * qualified class names. The HFileCleaner
will build the cleaner chain in
+ * qualified class names. The HFileCleaner will build the cleaner chain in
* order the order specified by the configuration.
- *
*
* For subclasses, setConf will be called exactly once before using the cleaner.
- *
*
* Since {@link BaseHFileCleanerDelegate HFileCleanerDelegates} are created in
* HFileCleaner by reflection, classes that implements this interface must
* provide a default constructor.
- *
*/
@InterfaceAudience.Private
public abstract class BaseHFileCleanerDelegate extends BaseFileCleanerDelegate {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
index e93ad576dbc..af3d302b57f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/handler/TableEventHandler.java
@@ -232,6 +232,7 @@ public abstract class TableEventHandler extends EventHandler {
* Gets a TableDescriptor from the masterServices. Can Throw exceptions.
*
* @return Table descriptor for this table
+ * @throws TableExistsException
* @throws FileNotFoundException
* @throws IOException
*/
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
index b6b9fe53963..b21f4e72a13 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/snapshot/MasterSnapshotVerifier.java
@@ -62,7 +62,6 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
* - SnapshotDescription is readable
* - Table info is readable
* - Regions
- *
*
* - Matching regions in the snapshot as currently in the table
* - {@link HRegionInfo} matches the current and stored regions
@@ -70,6 +69,7 @@ import org.apache.hadoop.hbase.zookeeper.MetaTableLocator;
* - All the hfiles are present (either in .archive directory in the region)
* - All recovered.edits files are present (by name) and have the correct file size
*
+ *
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index a3e862dc631..89807b0de3f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -3776,6 +3776,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
* the maxSeqId for the store to be applied, else its skipped.
* @return the sequence id of the last edit added to this region out of the
* recovered edits log or minSeqId
if nothing added from editlogs.
+ * @throws UnsupportedEncodingException
* @throws IOException
*/
protected long replayRecoveredEditsIfAny(final Path regiondir,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
index 630ca7df4e1..048d1284099 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/CompactionConfiguration.java
@@ -27,20 +27,18 @@ import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.regionserver.StoreConfigInformation;
/**
- *
* Compaction configuration for a particular instance of HStore.
* Takes into account both global settings and ones set on the column family/store.
* Control knobs for default compaction algorithm:
- *
- *
+ *
* maxCompactSize - upper bound on file size to be included in minor compactions
* minCompactSize - lower bound below which compaction is selected without ratio test
* minFilesToCompact - lower bound on number of files in any minor compaction
* maxFilesToCompact - upper bound on number of files in any minor compaction
* compactionRatio - Ratio used for compaction
* minLocalityToForceCompact - Locality threshold for a store file to major compact (HBASE-11195)
- *
- * Set parameter as "hbase.hstore.compaction.<attribute>"
+ *
+ * Set parameter as "hbase.hstore.compaction."
*/
@InterfaceAudience.Private
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
index 26fef53556a..15ead14159b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/compactions/Compactor.java
@@ -226,7 +226,7 @@ public abstract class Compactor {
* @param scanner Where to read from.
* @param writer Where to write to.
* @param smallestReadPoint Smallest read point.
- * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
+ * @param cleanSeqId When true, remove seqId(used to be mvcc) value which is <= smallestReadPoint
* @return Whether compaction ended; false if it was interrupted for some reason.
*/
protected boolean performCompaction(InternalScanner scanner, CellSink writer,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
index 6337e28e64e..92ab4d1498c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/tool/WriteSinkCoprocessor.java
@@ -32,31 +32,26 @@ import java.io.IOException;
import java.util.concurrent.atomic.AtomicLong;
/**
- *
* This coprocessor 'shallows' all the writes. It allows to test a pure
* write workload, going through all the communication layers.
* The reads will work as well, but they as we never write, they will always always
* return an empty structure. The WAL is also skipped.
* Obviously, the region will never be split automatically. It's up to the user
* to split and move it.
- *
- *
+ *
* For a table created like this:
- * create 'usertable', {NAME => 'f1', VERSIONS => 1}
- *
- *
+ * create 'usertable', {NAME => 'f1', VERSIONS => 1}
+ *
* You can then add the coprocessor with this command:
- * alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
- *
- *
+ * alter 'usertable', 'coprocessor' => '|org.apache.hadoop.hbase.tool.WriteSinkCoprocessor|'
+ *
* And then
* put 'usertable', 'f1', 'f1', 'f1'
- *
- *
+ *
* scan 'usertable'
* Will return:
* 0 row(s) in 0.0050 seconds
- *
+ *
*/
public class WriteSinkCoprocessor extends BaseRegionObserver {
private static final Log LOG = LogFactory.getLog(WriteSinkCoprocessor.class);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
index 9fff87256ff..5b6cb36a2ed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/BloomFilterChunk.java
@@ -88,7 +88,7 @@ public class BloomFilterChunk implements BloomFilterBase {
}
/**
- * Determines & initializes bloom filter meta data from user config. Call
+ * Determines & initializes bloom filter meta data from user config. Call
* {@link #allocBloom()} to allocate bloom filter data.
*
* @param maxKeys Maximum expected number of keys that will be stored in this
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
index 29ab24ee78c..23dc5700bad 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/hbck/HFileCorruptionChecker.java
@@ -51,7 +51,7 @@ import org.apache.hadoop.hbase.util.HBaseFsck.ErrorReporter;
/**
* This class marches through all of the region's hfiles and verifies that
* they are all valid files. One just needs to instantiate the class, use
- * checkTables(List<Path>) and then retrieve the corrupted hfiles (and
+ * checkTables(List) and then retrieve the corrupted hfiles (and
* quarantined files if in quarantining mode)
*
* The implementation currently parallelizes at the regionDir level.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
index d851d57cfce..f889672afed 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/DefaultWALProvider.java
@@ -304,8 +304,8 @@ public class DefaultWALProvider implements WALProvider {
* This function returns region server name from a log file name which is in one of the following
* formats:
*
- * - hdfs://<name node>/hbase/.logs/<server name>-splitting/...
- * - hdfs://<name node>/hbase/.logs/<server name>/...
+ * - hdfs://
/hbase/.logs/-splitting/...
+ * - hdfs://
/hbase/.logs//...
*
* @param logFile
* @return null if the passed in logFile isn't a valid WAL file path
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
index 9a26a2491cb..fc43765b55b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/wal/WALSplitter.java
@@ -2228,13 +2228,13 @@ public class WALSplitter {
}
/**
- * This function is used to construct mutations from a WALEntry. It also
- * reconstructs WALKey & WALEdit from the passed in WALEntry
+ * This function is used to construct mutations from a WALEntry. It also reconstructs WALKey &
+ * WALEdit from the passed in WALEntry
* @param entry
* @param cells
* @param logEntry pair of WALKey and WALEdit instance stores WALKey and WALEdit instances
* extracted from the passed in WALEntry.
- * @return list of Pair<MutationType, Mutation> to be replayed
+ * @return list of Pair to be replayed
* @throws IOException
*/
public static List getMutationsFromWALEntry(WALEntry entry, CellScanner cells,
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
index a6371cc0fca..beb3fe9958f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/zookeeper/lock/ZKInterProcessLockBase.java
@@ -417,7 +417,7 @@ public abstract class ZKInterProcessLockBase implements InterProcessLock {
/**
* Visits the locks (both held and attempted) with the given MetadataHandler.
- * @throws IOException If there is an unrecoverable error
+ * @throws InterruptedException If there is an unrecoverable error
*/
public void visitLocks(MetadataHandler handler) throws IOException {
List children;