HBASE-2076 Many javadoc warnings

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hbase/trunk@894511 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jean-Daniel Cryans 2009-12-29 23:22:39 +00:00
parent bbefab59d6
commit 42411929ff
6 changed files with 12 additions and 16 deletions

View File

@ -258,6 +258,7 @@ Release 0.21.0 - Unreleased
HBASE-2062 Metrics documentation outdated (Lars George via JD)
HBASE-2045 Update trunk and branch zk to just-release 3.2.2.
HBASE-2074 Improvements to the hadoop-config script (Bassam Tabbara via Stack)
HBASE-2076 Many javadoc warnings
NEW FEATURES
HBASE-1901 "General" partitioner for "hbase-48" bulk (behind the api, write

View File

@ -186,8 +186,8 @@ public class Scan implements Writable {
* @param minStamp minimum timestamp value, inclusive
* @param maxStamp maximum timestamp value, exclusive
* @throws IOException if invalid time range
* @see {@link #setMaxVersions()}
* @see {@link #setMaxVersions(int)}
* @see #setMaxVersions()
* @see #setMaxVersions(int)
*/
public Scan setTimeRange(long minStamp, long maxStamp)
throws IOException {
@ -201,8 +201,8 @@ public class Scan implements Writable {
* and you want all versions returned, up the number of versions beyond the
* defaut.
* @param timestamp version timestamp
* @see {@link #setMaxVersions()}
* @see {@link #setMaxVersions(int)}
* @see #setMaxVersions()
* @see #setMaxVersions(int)
*/
public Scan setTimeStamp(long timestamp) {
try {

View File

@ -54,10 +54,6 @@ import org.apache.hadoop.mapreduce.TaskAttemptContext;
* loss of data due to region server failure can be tolerated (for example,
* because it is easy to rerun a bulk import).
* </p>
*
* <p>
* See also the {@link IndexBuilder} example.
* </p>
*/
public class MultiTableOutputFormat extends OutputFormat<ImmutableBytesWritable, Writable> {
/** Set this to {@link #WAL_OFF} to turn off write-ahead logging (HLog) */

View File

@ -138,12 +138,13 @@ To achieve total ordering, you will likely need to write a Partitioner
that is intimate with your tables key namespace and that knows how
to distribute keys among the reducers so a total order is maintained. If your
keys are distributed with some regularity across a defined key space -- i.e.
you know the start and end keys -- then the {@link SimpleTotalOrderPartitioner}
you know the start and end keys -- then the
{@link org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner}
may be all you need.
</p>
<p>See org.apache.hadoop.hbase.mapreduce.TestHFileOutputFormat for an example
that puts together {@link org.apache.hadoop.hbase.mapreduce.KeyValueSortReducer},
{@link SimpleTotalOrderPartitioner}, and
{@link org.apache.hadoop.hbase.mapreduce.SimpleTotalOrderPartitioner}, and
{@link org.apache.hadoop.hbase.mapreduce.HFileOutputFormat}.</p>
<p>HFileOutputFormat writes HFiles. When your MapReduce file finishes, in your

View File

@ -51,7 +51,7 @@ public interface InternalScanner extends Closeable {
/**
* Grab the next row's worth of values with a limit on the number of values
* to return.
* @param results
* @param result
* @param limit
* @return true if more rows exist after this one, false if scanner is done
* @throws IOException

View File

@ -98,7 +98,7 @@ import org.apache.hadoop.hbase.util.Threads;
* separate reentrant lock is used.
*
* <p>To read an HLog, call {@link #getReader(org.apache.hadoop.fs.FileSystem,
* org.apache.hadoop.fs.Path, org.apache.hadoop.conf.Configuration)}.
* org.apache.hadoop.fs.Path, org.apache.hadoop.hbase.HBaseConfiguration)}.
*
*/
public class HLog implements HConstants, Syncable {
@ -352,8 +352,7 @@ public class HLog implements HConstants, Syncable {
* Get a reader for the WAL.
* @param fs
* @param path
* @param keyClass
* @param valueClass
* @param conf
* @return A WAL reader. Close when done with it.
* @throws IOException
*/
@ -377,8 +376,7 @@ public class HLog implements HConstants, Syncable {
/**
* Get a writer for the WAL.
* @param path
* @param keyClass
* @param valueClass
* @param conf
* @return A WAL writer. Close when done with it.
* @throws IOException
*/