HBASE-7361 Fix all javadoc warnings in hbase-server/{,mapreduce}

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1423096 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Michael Stack 2012-12-17 19:08:02 +00:00
parent 07d670920e
commit 4cc31baec3
53 changed files with 96 additions and 100 deletions

View File

@ -901,7 +901,8 @@ public class HTableDescriptor implements WritableComparable<HTableDescriptor> {
/**
* <em> INTERNAL </em> This method is a part of {@link WritableComparable} interface
* and is used for serialization of the HTableDescriptor over RPC
* @deprecated Writables are going away. Use pb {@link #toByteArray(byte[])} instead.
* @deprecated Writables are going away.
* Use {@link com.google.protobuf.MessageLite#toByteArray} instead.
*/
@Deprecated
@Override

View File

@ -153,8 +153,8 @@ public interface MasterAdminProtocol extends
/**
* Unassign a region from current hosting regionserver. Region will then be
* assigned to a regionserver chosen at random. Region could be reassigned
* back to the same server. Use {@link #moveRegion(RpcController,MoveRegionRequest)}
* if you want to control the region movement.
* back to the same server. Use {@link #moveRegion} if you want to
* control the region movement.
* @param controller Unused (set to null).
* @param req The request that contains:<br>
* - region: Region to unassign. Will clear any existing RegionPlan

View File

@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* A Get, Put or Delete associated with it's region. Used internally by
* {@link HTable::batch} to associate the action with it's region and maintain
* {@link HTable#batch} to associate the action with it's region and maintain
* the index from the original request.
*/
@InterfaceAudience.Public
@ -34,7 +34,7 @@ public class Action<R> implements Comparable {
private int originalIndex;
private R result;
/*
/**
* This constructor is replaced by {@link #Action(Row, int)}
*/
@Deprecated

View File

@ -460,7 +460,7 @@ public class HBaseAdmin implements Abortable, Closeable {
/**
* Creates a new table but does not block and wait for it to come online.
* Asynchronous operation. To check if the table exists, use
* {@link: #isTableAvailable()} -- it is not safe to create an HTable
* {@link #isTableAvailable} -- it is not safe to create an HTable
* instance to this table before it is available.
* Note : Avoid passing empty split key.
* @param desc table descriptor for table

View File

@ -487,8 +487,8 @@ public class HTable implements HTableInterface {
/**
* Get the corresponding regions for an arbitrary range of keys.
* <p>
* @param startRow Starting row in range, inclusive
* @param endRow Ending row in range, exclusive
* @param startKey Starting row in range, inclusive
* @param endKey Ending row in range, exclusive
* @return A list of HRegionLocations corresponding to the regions that
* contain the specified range
* @throws IOException if a remote or network exception occurs
@ -917,8 +917,7 @@ public class HTable implements HTableInterface {
* Process a mixed batch of Get, Put and Delete actions. All actions for a
* RegionServer are forwarded in one RPC call. Queries are executed in parallel.
*
*
* @param actions The collection of actions.
* @param list The collection of actions.
* @param results An empty array, same size as list. If an exception is thrown,
* you can test here for partial results, and to determine which actions
* processed successfully.

View File

@ -290,8 +290,7 @@ public interface HTableInterface extends Closeable {
* Performs multiple mutations atomically on a single row. Currently
* {@link Put} and {@link Delete} are supported.
*
* @param arm object that specifies the set of mutations to perform
* atomically
* @param rm object that specifies the set of mutations to perform atomically
* @throws IOException
*/
public void mutateRow(final RowMutations rm) throws IOException;
@ -537,7 +536,8 @@ public interface HTableInterface extends Closeable {
/**
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
* and invokes the passed {@link Batch.Call#call(Object)} method with each {@link Service}
* and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
* method with each {@link Service}
* instance.
*
* @param service the protocol buffer {@code Service} implementation to call
@ -545,11 +545,13 @@ public interface HTableInterface extends Closeable {
* selection will start with the first table region.
* @param endKey select regions up to and including the region containing this row.
* If {@code null}, selection will continue through the last table region.
* @param callable this instance's {@link Batch.Call#call(Object)} method will be invoked once
* per table region, using the {@link Service} instance connected to that region.
* @param callable this instance's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
* method will be invoked once per table region, using the {@link Service}
* instance connected to that region.
* @param <T> the {@link Service} subclass to connect to
* @param <R> Return type for the {@code callable} parameter's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} method
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
* @return a map of result values keyed by region name
*/
<T extends Service, R> Map<byte[],R> coprocessorService(final Class<T> service,
@ -559,14 +561,14 @@ public interface HTableInterface extends Closeable {
/**
* Creates an instance of the given {@link com.google.protobuf.Service} subclass for each table
* region spanning the range from the {@code startKey} row to {@code endKey} row (inclusive),
* and invokes the passed {@link Batch.Call#call(Object)} method with each {@link Service}
* instance.
* and invokes the passed {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call}
* method with each {@link Service} instance.
*
* <p>
* The given
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Callback#update(byte[], byte[], Object)}
* method will be called with the return value from each region's {@link Batch.Call#call(Object)}
* invocation.
* method will be called with the return value from each region's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} invocation.
*</p>
*
* @param service the protocol buffer {@code Service} implementation to call
@ -574,12 +576,14 @@ public interface HTableInterface extends Closeable {
* selection will start with the first table region.
* @param endKey select regions up to and including the region containing this row.
* If {@code null}, selection will continue through the last table region.
* @param callable this instance's {@link Batch.Call#call(Object)} method will be invoked once
* per table region, using the {@link Service} instance connected to that region.
* @param callable this instance's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
* will be invoked once per table region, using the {@link Service} instance
* connected to that region.
* @param callback
* @param <T> the {@link Service} subclass to connect to
* @param <R> Return type for the {@code callable} parameter's
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call(Object)} method
* {@link org.apache.hadoop.hbase.client.coprocessor.Batch.Call#call} method
*/
<T extends Service, R> void coprocessorService(final Class<T> service,
byte[] startKey, byte[] endKey, final Batch.Call<T,R> callable,
@ -589,7 +593,7 @@ public interface HTableInterface extends Closeable {
* See {@link #setAutoFlush(boolean, boolean)}
*
* @param autoFlush
* Whether or not to enable 'auto-flush'.
* Whether or not to enable 'auto-flush'.
*/
public void setAutoFlush(boolean autoFlush);
@ -600,13 +604,13 @@ public interface HTableInterface extends Closeable {
* and are immediately executed. Failed operations are not retried. This is
* slower but safer.
* <p>
* Turning off {@link #autoFlush} means that multiple {@link Put}s will be
* Turning off {@code autoFlush} means that multiple {@link Put}s will be
* accepted before any RPC is actually sent to do the write operations. If the
* application dies before pending writes get flushed to HBase, data will be
* lost.
* <p>
* When you turn {@link #autoFlush} off, you should also consider the
* {@link #clearBufferOnFail} option. By default, asynchronous {@link Put}
* When you turn {@code #autoFlush} off, you should also consider the
* {@code clearBufferOnFail} option. By default, asynchronous {@link Put}
* requests will be retried on failure until successful. However, this can
* pollute the writeBuffer and slow down batching performance. Additionally,
* you may want to issue a number of Put requests and call
@ -615,9 +619,9 @@ public interface HTableInterface extends Closeable {
* has been called, regardless of success.
*
* @param autoFlush
* Whether or not to enable 'auto-flush'.
* Whether or not to enable 'auto-flush'.
* @param clearBufferOnFail
* Whether to keep Put failures in the writeBuffer
* Whether to keep Put failures in the writeBuffer
* @see #flushCommits
*/
public void setAutoFlush(boolean autoFlush, boolean clearBufferOnFail);

View File

@ -147,7 +147,6 @@ public abstract class ServerCallable<T> implements Callable<T> {
* Run this instance with retries, timed waits,
* and refinds of missing regions.
*
* @param <T> the type of the return value
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error
@ -202,7 +201,6 @@ public abstract class ServerCallable<T> implements Callable<T> {
/**
* Run this instance against the server once.
* @param <T> the type of the return value
* @return an object of type T
* @throws IOException if a remote or network exception occurs
* @throws RuntimeException other unspecified error

View File

@ -40,7 +40,7 @@ import com.google.protobuf.Service;
/**
* This class demonstrates how to implement atomic read-modify-writes
* using {@link HRegion#processRowsWithLocks()} and Coprocessor endpoints.
* using {@link HRegion#processRowsWithLocks} and Coprocessor endpoints.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
@ -54,7 +54,7 @@ extends RowProcessorService implements CoprocessorService, Coprocessor {
* RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
* the Coprocessor endpoint together.
*
* See {@link TestRowProcessorEndpoint} for example.
* See {@code TestRowProcessorEndpoint} for example.
*
* The request contains information for constructing processor
* (see {@link #constructRowProcessorFromRequest}. The processor object defines

View File

@ -185,7 +185,7 @@ public interface RegionObserver extends Coprocessor {
* @param c the environment provided by the region server
* @param store the store being compacted
* @param scanners the list {@link StoreFileScanner}s to be read from
* @param scantype the {@link ScanType} indicating whether this is a major or minor compaction
* @param scanType the {@link ScanType} indicating whether this is a major or minor compaction
* @param earliestPutTs timestamp of the earliest put that was found in any of the involved
* store files
* @param s the base scanner, if not {@code null}, from previous RegionObserver in the chain

View File

@ -62,7 +62,7 @@ public class BinaryComparator extends ByteArrayComparable {
* @param pbBytes A pb serialized {@link BinaryComparator} instance
* @return An instance of {@link BinaryComparator} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static BinaryComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -64,7 +64,7 @@ public class BinaryPrefixComparator extends ByteArrayComparable {
* @param pbBytes A pb serialized {@link BinaryPrefixComparator} instance
* @return An instance of {@link BinaryPrefixComparator} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static BinaryPrefixComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -79,7 +79,7 @@ public class BitComparator extends ByteArrayComparable {
* @param pbBytes A pb serialized {@link BitComparator} instance
* @return An instance of {@link BitComparator} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static BitComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -62,7 +62,7 @@ public abstract class ByteArrayComparable implements Comparable<byte[]> {
* @param pbBytes A pb serialized {@link ByteArrayComparable} instance
* @return An instance of {@link ByteArrayComparable} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static ByteArrayComparable parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -88,7 +88,7 @@ public class ColumnCountGetFilter extends FilterBase {
* @param pbBytes A pb serialized {@link ColumnCountGetFilter} instance
* @return An instance of {@link ColumnCountGetFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static ColumnCountGetFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -108,7 +108,7 @@ public class ColumnPaginationFilter extends FilterBase
* @param pbBytes A pb serialized {@link ColumnPaginationFilter} instance
* @return An instance of {@link ColumnPaginationFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static ColumnPaginationFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -102,7 +102,7 @@ public class ColumnPrefixFilter extends FilterBase {
* @param pbBytes A pb serialized {@link ColumnPrefixFilter} instance
* @return An instance of {@link ColumnPrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static ColumnPrefixFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -180,7 +180,7 @@ public class ColumnRangeFilter extends FilterBase {
* @param pbBytes A pb serialized {@link ColumnRangeFilter} instance
* @return An instance of {@link ColumnRangeFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static ColumnRangeFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -234,7 +234,7 @@ public class DependentColumnFilter extends CompareFilter {
* @param pbBytes A pb serialized {@link DependentColumnFilter} instance
* @return An instance of {@link DependentColumnFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static DependentColumnFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -92,7 +92,7 @@ public class FamilyFilter extends CompareFilter {
* @param pbBytes A pb serialized {@link FamilyFilter} instance
* @return An instance of {@link FamilyFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static FamilyFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -180,7 +180,7 @@ public abstract class Filter {
* @param pbBytes A pb serialized {@link Filter} instance
* @return An instance of {@link Filter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static Filter parseFrom(final byte [] pbBytes) throws DeserializationException {
throw new DeserializationException(

View File

@ -293,7 +293,7 @@ public class FilterList extends Filter {
* @param pbBytes A pb serialized {@link FilterList} instance
* @return An instance of {@link FilterList} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static FilterList parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -66,7 +66,7 @@ public class FilterWrapper extends Filter {
* @param pbBytes A pb serialized {@link FilterWrapper} instance
* @return An instance of {@link FilterWrapper} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static FilterWrapper parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -86,7 +86,7 @@ public class FirstKeyOnlyFilter extends FilterBase {
* @param pbBytes A pb serialized {@link FirstKeyOnlyFilter} instance
* @return An instance of {@link FirstKeyOnlyFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static FirstKeyOnlyFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -93,7 +93,7 @@ public class FirstKeyValueMatchingQualifiersFilter extends FirstKeyOnlyFilter {
* @param pbBytes A pb serialized {@link FirstKeyValueMatchingQualifiersFilter} instance
* @return An instance of {@link FirstKeyValueMatchingQualifiersFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static FirstKeyValueMatchingQualifiersFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -150,7 +150,7 @@ public class FuzzyRowFilter extends FilterBase {
* @param pbBytes A pb serialized {@link FuzzyRowFilter} instance
* @return An instance of {@link FuzzyRowFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static FuzzyRowFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -95,7 +95,7 @@ public class InclusiveStopFilter extends FilterBase {
* @param pbBytes A pb serialized {@link InclusiveStopFilter} instance
* @return An instance of {@link InclusiveStopFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static InclusiveStopFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -74,7 +74,7 @@ public class KeyOnlyFilter extends FilterBase {
* @param pbBytes A pb serialized {@link KeyOnlyFilter} instance
* @return An instance of {@link KeyOnlyFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static KeyOnlyFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -121,7 +121,7 @@ public class MultipleColumnPrefixFilter extends FilterBase {
* @param pbBytes A pb serialized {@link MultipleColumnPrefixFilter} instance
* @return An instance of {@link MultipleColumnPrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static MultipleColumnPrefixFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -61,7 +61,7 @@ public class NullComparator extends ByteArrayComparable {
* @param pbBytes A pb serialized {@link NullComparator} instance
* @return An instance of {@link NullComparator} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static NullComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -93,7 +93,7 @@ public class PageFilter extends FilterBase {
* @param pbBytes A pb serialized {@link PageFilter} instance
* @return An instance of {@link PageFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static PageFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -90,7 +90,7 @@ public class PrefixFilter extends FilterBase {
* @param pbBytes A pb serialized {@link PrefixFilter} instance
* @return An instance of {@link PrefixFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static PrefixFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -92,7 +92,7 @@ public class QualifierFilter extends CompareFilter {
* @param pbBytes A pb serialized {@link QualifierFilter} instance
* @return An instance of {@link QualifierFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static QualifierFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -122,7 +122,7 @@ public class RandomRowFilter extends FilterBase {
* @param pbBytes A pb serialized {@link RandomRowFilter} instance
* @return An instance of {@link RandomRowFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static RandomRowFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -62,7 +62,7 @@ import java.util.regex.Pattern;
* ValueFilter vf = new ValueFilter(CompareOp.EQUAL,
* new RegexStringComparator("regex", Pattern.CASE_INSENSITIVE | Pattern.DOTALL));
* </pre>
* @see java.util.regex.Pattern;
* @see java.util.regex.Pattern
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
@ -132,7 +132,7 @@ public class RegexStringComparator extends ByteArrayComparable {
* @param pbBytes A pb serialized {@link RegexStringComparator} instance
* @return An instance of {@link RegexStringComparator} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static RegexStringComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -107,7 +107,7 @@ public class RowFilter extends CompareFilter {
* @param pbBytes A pb serialized {@link RowFilter} instance
* @return An instance of {@link RowFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static RowFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -136,7 +136,7 @@ public class SingleColumnValueExcludeFilter extends SingleColumnValueFilter {
* @param pbBytes A pb serialized {@link SingleColumnValueExcludeFilter} instance
* @return An instance of {@link SingleColumnValueExcludeFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static SingleColumnValueExcludeFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -332,7 +332,7 @@ public class SingleColumnValueFilter extends FilterBase {
* @param pbBytes A pb serialized {@link SingleColumnValueFilter} instance
* @return An instance of {@link SingleColumnValueFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static SingleColumnValueFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -108,7 +108,7 @@ public class SkipFilter extends FilterBase {
* @param pbBytes A pb serialized {@link SkipFilter} instance
* @return An instance of {@link SkipFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static SkipFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -82,7 +82,7 @@ public class SubstringComparator extends ByteArrayComparable {
* @param pbBytes A pb serialized {@link SubstringComparator} instance
* @return An instance of {@link SubstringComparator} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static SubstringComparator parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -123,7 +123,7 @@ public class TimestampsFilter extends FilterBase {
* @param pbBytes A pb serialized {@link TimestampsFilter} instance
* @return An instance of {@link TimestampsFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static TimestampsFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -88,7 +88,7 @@ public class ValueFilter extends CompareFilter {
* @param pbBytes A pb serialized {@link ValueFilter} instance
* @return An instance of {@link ValueFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static ValueFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -108,7 +108,7 @@ public class WhileMatchFilter extends FilterBase {
* @param pbBytes A pb serialized {@link WhileMatchFilter} instance
* @return An instance of {@link WhileMatchFilter} made from <code>bytes</code>
* @throws DeserializationException
* @see {@link #toByteArray()}
* @see #toByteArray
*/
public static WhileMatchFilter parseFrom(final byte [] pbBytes)
throws DeserializationException {

View File

@ -66,7 +66,7 @@ public class HFileSystem extends FilterFileSystem {
/**
* Create a FileSystem object for HBase regionservers.
* @param conf The configuration to be used for the filesystem
* @param useHBaseChecksums if true, then use
* @param useHBaseChecksum if true, then use
* checksum verfication in hbase, otherwise
* delegate checksum verification to the FileSystem.
*/

View File

@ -56,13 +56,13 @@ import org.apache.hadoop.hbase.util.FSUtils;
* original location or in the archive folder.
* The FileLink class tries to abstract this concept and given a set of locations
* it is able to switch between them making this operation transparent for the user.
* More concrete implementations of the FileLink are the {@link HFileLink} and the {@link HLogLink}.
* {@link HFileLink} is a more concrete implementation of the {@code FileLink}.
*
* <p><b>Back-references:</b>
* To help the {@link CleanerChore} to keep track of the links to a particular file,
* during the FileLink creation, a new file is placed inside a back-reference directory.
* There's one back-reference directory for each file that has links,
* and in the directory there's one file per link.
* To help the {@link org.apache.hadoop.hbase.master.cleaner.CleanerChore} to keep track of
* the links to a particular file, during the {@code FileLink} creation, a new file is placed
* inside a back-reference directory. There's one back-reference directory for each file that
* has links, and in the directory there's one file per link.
*
* <p>HFileLink Example
* <ul>

View File

@ -69,7 +69,7 @@ public class HFileLink extends FileLink {
}
/**
* @param rootdir Path to the root directory where hbase files are stored
* @param rootDir Path to the root directory where hbase files are stored
* @param archiveDir Path to the hbase archive directory
* @param path The path of the HFile Link.
*/
@ -82,7 +82,7 @@ public class HFileLink extends FileLink {
/**
* @param originPath Path to the hfile in the table directory
* @param archiveDir Path to the hfile in the archive directory
* @param archivePath Path to the hfile in the archive directory
*/
public HFileLink(final Path originPath, final Path archivePath) {
this.originPath = originPath;
@ -105,7 +105,7 @@ public class HFileLink extends FileLink {
}
/**
* @param p Path to check.
* @param path Path to check.
* @return True if the path is a HFileLink.
*/
public static boolean isHFileLink(final Path path) {
@ -158,7 +158,7 @@ public class HFileLink extends FileLink {
* or a path to the archived file like: /hbase/.archive/table/region/cf/hfile
*
* @param fs {@link FileSystem} on which to check the HFileLink
* @param rootdir root hbase directory
* @param rootDir root hbase directory
* @param archiveDir Path to the hbase archive directory
* @param path HFileLink path
* @return Referenced path (original path or archived path)
@ -325,7 +325,7 @@ public class HFileLink extends FileLink {
/**
* Get the full path of the HFile referenced by the back reference
*
* @param rootdir root hbase directory
* @param rootDir root hbase directory
* @param linkRefPath Link Back Reference path
* @return full path of the referenced hfile
* @throws IOException on unexpected error.

View File

@ -51,8 +51,8 @@ public interface BlockCache {
* @param caching Whether this request has caching enabled (used for stats)
* @param repeat Whether this is a repeat lookup for the same block
* (used to avoid double counting cache misses when doing double-check locking)
* {@see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)}
* @return Block or null if block is not in 2 cache.
* @see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)
*/
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat);

View File

@ -596,17 +596,14 @@ public class HFileBlock implements Cacheable {
/**
* Unified version 2 {@link HFile} block writer. The intended usage pattern
* is as follows:
* <ul>
* <li>Construct an {@link HFileBlock.Writer}, providing a compression
* algorithm
* <li>Call {@link Writer#startWriting(BlockType, boolean)} and get a data stream to
* write to
* <li>Write your data into the stream
* <li>Call {@link Writer#writeHeaderAndData(FSDataOutputStream)} as many times as you need to
* store the serialized block into an external stream, or call
* {@link Writer#getHeaderAndData()} to get it as a byte array.
* <li>Repeat to write more blocks
* </ul>
* <ol>
* <li>Construct an {@link HFileBlock.Writer}, providing a compression algorithm.
* <li>Call {@link Writer#startWriting} and get a data stream to write to.
* <li>Write your data into the stream.
* <li>Call {@link Writer#writeHeaderAndData(FSDataOutputStream)} as many times as you need to.
* store the serialized block into an external stream.
* <li>Repeat to write more blocks.
* </ol>
* <p>
*/
public static class Writer {
@ -696,7 +693,7 @@ public class HFileBlock implements Cacheable {
/**
* @param compressionAlgorithm compression algorithm to use
* @param dataBlockEncoderAlgo data block encoding algorithm to use
* @param dataBlockEncoder data block encoding algorithm to use
* @param checksumType type of checksum
* @param bytesPerChecksum bytes per checksum
*/

View File

@ -401,7 +401,6 @@ public class HFileReaderV1 extends AbstractHFileReader {
* first key in the block = key, then you'll get thrown exceptions.
* @param key to find
* @param seekBefore find the key before the exact match.
* @return
*/
protected abstract int blockSeek(byte[] key, int offset, int length,
boolean seekBefore);

View File

@ -334,8 +334,8 @@ public class LruBlockCache implements BlockCache, HeapSize {
* @param caching true if the caller caches blocks on cache misses
* @param repeat Whether this is a repeat lookup for the same block
* (used to avoid double counting cache misses when doing double-check locking)
* {@see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)}
* @return buffer of specified cache key, or null if not in cache
* @see HFileReaderV2#readBlock(long, long, boolean, boolean, boolean, BlockType)
*/
@Override
public Cacheable getBlock(BlockCacheKey cacheKey, boolean caching, boolean repeat) {

View File

@ -1173,7 +1173,6 @@ public class HBaseClient {
/**
* Construct an IPC client with the default SocketFactory
* @param valueClass value class
* @param conf configuration
*/
public HBaseClient(Configuration conf) {

View File

@ -193,7 +193,7 @@ public abstract class HBaseServer implements RpcServer {
/** Returns the server instance called under or null. May be called under
* {@link #call(Class, RpcRequestBody, long, MonitoredRPCHandler)} implementations,
* and under protobuf methods of paramters and return values.
* and under protobuf methods of parameters and return values.
* Permits applications to access the server context.
* @return HBaseServer
*/

View File

@ -52,10 +52,8 @@ import org.apache.hadoop.util.ReflectionUtils;
* Mapper implementations using this MapRunnable must be thread-safe.
* <p>
* The Map-Reduce job has to be configured with the mapper to use via
* {@link #setMapperClass(Configuration, Class)} and
* the number of thread the thread-pool can use with the
* {@link #getNumberOfThreads(Configuration) method. The default
* value is 10 threads.
* {@link #setMapperClass} and the number of thread the thread-pool can use with the
* {@link #getNumberOfThreads} method. The default value is 10 threads.
* <p>
*/

View File

@ -398,6 +398,7 @@
<maxmemory>2g</maxmemory>
<verbose>true</verbose>
<reportOutputDirectory>target/apidocs</reportOutputDirectory>
<detectLinks>true</detectLinks>
</configuration>
</plugin>
<plugin>