HBASE-9693 Fix javadoc warnings

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1528938 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
jxiang 2013-10-03 17:02:23 +00:00
parent ad6a463e4e
commit 04de6764ac
19 changed files with 23 additions and 43 deletions

View File

@ -172,7 +172,7 @@ public class MetaReader {
/**
* Callers should call close on the returned {@link HTable} instance.
* @param catalogTracker
* @return
* @return An {@link HTable} for <code>hbase:meta</code>
* @throws IOException
*/
static HTable getCatalogHTable(final CatalogTracker catalogTracker)

View File

@ -536,7 +536,7 @@ public class AggregationClient {
* std.
* @param table
* @param scan
* @return
* @return standard deviations
* @throws Throwable
*/
private <R, S, P extends Message, Q extends Message, T extends Message>

View File

@ -158,7 +158,6 @@ public class Tag {
/**
* Returns the total length of the entire tag entity
* @return
*/
short getLength() {
return this.length;
@ -166,7 +165,6 @@ public class Tag {
/**
* Returns the offset of the entire tag entity
* @return
*/
int getOffset() {
return this.offset;

View File

@ -77,7 +77,6 @@ public final class Compression {
/**
* Returns the classloader to load the Codec class from.
* @return
*/
private static ClassLoader getClassLoaderForCodec() {
ClassLoader cl = Thread.currentThread().getContextClassLoader();

View File

@ -825,7 +825,7 @@ public class OrderedBytes {
* a value in Numeric encoding and is within the valid range of
* {@link BigDecimal} values. {@link BigDecimal} does not support {@code NaN}
* or {@code Infinte} values.
* @see #decodeNumericAsDouble(byte[], int)
* @see #decodeNumericAsDouble(PositionedByteRange)
*/
private static BigDecimal decodeNumericValue(PositionedByteRange src) {
final int e;

View File

@ -25,12 +25,9 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.Server;
import org.cloudera.htrace.Sampler;
import org.cloudera.htrace.Span;
import org.cloudera.htrace.Trace;
import org.cloudera.htrace.TraceScope;
import org.cloudera.htrace.impl.AlwaysSampler;
/**
* Abstract base class for all HBase event handlers. Subclasses should

View File

@ -206,11 +206,10 @@ public class Reference {
}
/**
* Use this instead of {@link #toByteArray()} when writing to a stream and you want to use
* the pb mergeDelimitedFrom (w/o the delimiter, pb reads to EOF which may not be what ou want).
* Use this when writing to a stream and you want to use the pb mergeDelimitedFrom
* (w/o the delimiter, pb reads to EOF which may not be what you want).
* @return This instance serialized as a delimited protobuf w/ a magic pb prefix.
* @throws IOException
* @see {@link #toByteArray()}
*/
byte [] toByteArray() throws IOException {
return ProtobufUtil.prependPBMagic(convert().toByteArray());

View File

@ -729,7 +729,7 @@ public class HFile {
* We write it as a protobuf.
* @param out
* @throws IOException
* @see {@link #read(DataInputStream)}
* @see #read(DataInputStream)
*/
void write(final DataOutputStream out) throws IOException {
HFileProtos.FileInfoProto.Builder builder = HFileProtos.FileInfoProto.newBuilder();
@ -748,7 +748,7 @@ public class HFile {
* Can deserialize protobuf of old Writables format.
* @param in
* @throws IOException
* @see {@link #write(DataOutputStream)}
* @see #write(DataOutputStream)
*/
void read(final DataInputStream in) throws IOException {
// This code is tested over in TestHFileReaderV1 where we read an old hfile w/ this new code.

View File

@ -244,7 +244,7 @@ public class BucketCache implements BlockCache, HeapSize {
* Get the IOEngine from the IO engine name
* @param ioEngineName
* @param capacity
* @return
* @return the IOEngine
* @throws IOException
*/
private IOEngine getIOEngineFromName(String ioEngineName, long capacity)

View File

@ -206,7 +206,6 @@ public class UpgradeTo96 extends Configured implements Tool {
* <li> Upgrading Znodes
* <li> Log splitting
* </ul>
* @return
* @throws Exception
*/
private int executeUpgrade() throws Exception {
@ -230,7 +229,6 @@ public class UpgradeTo96 extends Configured implements Tool {
/**
* Performs log splitting for all regionserver directories.
* @return
* @throws Exception
*/
private void doOfflineLogSplitting() throws Exception {

View File

@ -222,7 +222,7 @@ public class ProcedureCoordinator {
* @param procName
* @param procArgs
* @param expectedMembers
* @return
* @return the newly created procedure
*/
Procedure createProcedure(ForeignExceptionDispatcher fed, String procName, byte[] procArgs,
List<String> expectedMembers) {

View File

@ -18,8 +18,6 @@
*/
package org.apache.hadoop.hbase.regionserver;
import javax.management.ObjectName;
import java.io.IOException;
import java.lang.Thread.UncaughtExceptionHandler;
import java.lang.annotation.Retention;
@ -38,7 +36,6 @@ import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.UUID;
import java.util.Map.Entry;
import java.util.Random;
import java.util.Set;
@ -50,6 +47,8 @@ import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.ConcurrentSkipListMap;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import javax.management.ObjectName;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
@ -65,7 +64,6 @@ import org.apache.hadoop.hbase.ClockOutOfSyncException;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.HealthCheckChore;
@ -90,7 +88,6 @@ import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.client.RowMutations;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
@ -120,7 +117,6 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.ResponseConverter;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CloseRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.CompactRegionRequest;
@ -138,7 +134,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetStoreFileRespon
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.MergeRegionsResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionRequest.RegionOpenInfo;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.OpenRegionResponse.RegionOpeningState;
@ -152,6 +147,7 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.StopServerResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.UpdateFavoredNodesResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.ActionResult;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.BulkLoadHFileRequest;
@ -185,7 +181,6 @@ import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.Regio
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStartupResponse;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionServerStatusService;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.ReportRSFatalErrorRequest;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey;
import org.apache.hadoop.hbase.regionserver.HRegion.Operation;
import org.apache.hadoop.hbase.regionserver.Leases.LeaseStillHeldException;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionProgress;
@ -4407,7 +4402,6 @@ public class HRegionServer implements ClientProtos.ClientService.BlockingInterfa
/**
* Return the last failed RS name under /hbase/recovering-regions/encodedRegionName
* @param encodedRegionName
* @return
* @throws IOException
* @throws KeeperException
*/

View File

@ -135,7 +135,7 @@ public class MemStore implements HeapSize {
/**
* Creates a snapshot of the current memstore.
* Snapshot must be cleared by call to {@link #clearSnapshot(SortedSet<KeyValue>)}
* Snapshot must be cleared by call to {@link #clearSnapshot(SortedSet)}
* To get the snapshot made by this method, use {@link #getSnapshot()}
*/
void snapshot() {
@ -172,8 +172,8 @@ public class MemStore implements HeapSize {
* Called by flusher to get current snapshot made by a previous
* call to {@link #snapshot()}
* @return Return snapshot.
* @see {@link #snapshot()}
* @see {@link #clearSnapshot(SortedSet<KeyValue>)}
* @see #snapshot()
* @see #clearSnapshot(SortedSet)
*/
KeyValueSkipListSet getSnapshot() {
return this.snapshot;
@ -183,7 +183,7 @@ public class MemStore implements HeapSize {
* The passed snapshot was successfully persisted; it can be let go.
* @param ss The snapshot to clean out.
* @throws UnexpectedException
* @see {@link #snapshot()}
* @see #snapshot()
*/
void clearSnapshot(final SortedSet<KeyValue> ss)
throws UnexpectedException {

View File

@ -56,14 +56,14 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.CellUtil;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HRegionLocation;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.RemoteExceptionHandler;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.Delete;
@ -71,7 +71,6 @@ import org.apache.hadoop.hbase.client.HConnection;
import org.apache.hadoop.hbase.client.HConnectionManager;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Row;
import org.apache.hadoop.hbase.exceptions.RegionOpeningException;
import org.apache.hadoop.hbase.io.HeapSize;
import org.apache.hadoop.hbase.master.SplitLogManager;
@ -79,12 +78,12 @@ import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.protobuf.RequestConverter;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoRequest;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.GetRegionInfoResponse;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry;
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService.BlockingInterface;
import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.MutationType;
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
import org.apache.hadoop.hbase.protobuf.generated.WALProtos.WALKey;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.RegionStoreSequenceIds;
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos.StoreSequenceId;
@ -648,7 +647,6 @@ public class HLogSplitter {
/**
* Get current open writers
* @return
*/
private int getNumOpenWriters() {
int result = 0;

View File

@ -202,7 +202,7 @@ public class AccessController extends BaseRegionObserver
* @param e the coprocessor environment
* @param families the map of column families to qualifiers present in
* the request
* @return
* @return an authorization result
*/
AuthResult permissionGranted(String request, User user, Permission.Action permRequest,
RegionCoprocessorEnvironment e,

View File

@ -276,7 +276,7 @@ public class TableAuthManager {
* Authorizes a global permission
* @param perms
* @param action
* @return
* @return true if authorized, false otherwise
*/
private boolean authorize(List<Permission> perms, Permission.Action action) {
if (perms != null) {

View File

@ -858,7 +858,7 @@ public class HBaseFsck extends Configured implements Tool {
* To get the column family list according to the column family dirs
* @param columns
* @param hbi
* @return
* @return a set of column families
* @throws IOException
*/
private Set<String> getColumnFamilyList(Set<String> columns, HbckInfo hbi) throws IOException {

View File

@ -250,7 +250,6 @@ public class HFileV1Detector extends Configured implements Tool {
/**
* Ignore ROOT table as it doesn't exist in 0.96.
* @param path
* @return
*/
private boolean isRootTable(Path path) {
if (path != null && path.toString().endsWith("-ROOT-")) return true;
@ -385,7 +384,6 @@ public class HFileV1Detector extends Configured implements Tool {
/**
* Removes the prefix of defaultNamespace from the path.
* @param originPath
* @return
*/
private String removeDefaultNSPath(Path originalPath) {
String pathStr = originalPath.toString();

View File

@ -433,7 +433,6 @@ public class ThriftServerRunner implements Runnable {
* Returns a list of all the column families for a given htable.
*
* @param table
* @return
* @throws IOException
*/
byte[][] getAllColumns(HTable table) throws IOException {