HBASE-22572 Javadoc Warnings: @link reference not found (#306)

Signed-off-by: Jan Hentschel <jan.hentschel@ultratendency.com>
Signed-off-by: stack <stack@apache.org>
This commit is contained in:
syedmurtazahassan 2019-07-02 06:15:00 +02:00 committed by Michael Stack
parent 52eb893bcc
commit fabf2b8282
18 changed files with 34 additions and 30 deletions

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the {@link org.apache.hadoop.hbase.filter} package.
* Tag a test as related to the {@code org.apache.hadoop.hbase.filter} package.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests
* @see org.apache.hadoop.hbase.testclassification.CoprocessorTests

View File

@ -19,7 +19,7 @@
package org.apache.hadoop.hbase.testclassification;
/**
* Tag a test as related to the {@link org.apache.hadoop.hbase.io} package. Things like HFile and
* Tag a test as related to the {@code org.apache.hadoop.hbase.io} package. Things like HFile and
* the like.
*
* @see org.apache.hadoop.hbase.testclassification.ClientTests

View File

@ -45,4 +45,5 @@
<suppress checks="IllegalImport" message="org\.apache\.htrace\.core"/>
<suppress checks="ImportOrder" message="Extra separation in import group before"/>
<suppress checks="MethodLength" files="org.apache.hadoop.hbase.thrift.DemoClient.java"/>
<suppress checks="LineLength" files="org.apache.hadoop.hbase.util.ZKDataMigrator.java"/>
</suppressions>

View File

@ -246,8 +246,7 @@ public class MetricsConnection implements StatisticTrackable {
/** Default load factor from {@link java.util.HashMap#DEFAULT_LOAD_FACTOR} */
private static final float LOAD_FACTOR = 0.75f;
/**
* Anticipated number of concurrent accessor threads, from
* {@link ConnectionImplementation#getBatchPool()}
* Anticipated number of concurrent accessor threads
*/
private static final int CONCURRENCY_LEVEL = 256;

View File

@ -26,9 +26,9 @@ import org.apache.yetus.audience.InterfaceAudience;
* A {@code WeakReference} based shared object pool.
* The objects are kept in weak references and
* associated with keys which are identified by the {@code equals} method.
* The objects are created by {@link ObjectFactory} on demand.
* The object creation is expected to be lightweight,
* and the objects may be excessively created and discarded.
* The objects are created by {@link org.apache.hadoop.hbase.util.ObjectPool.ObjectFactory} on
* demand. The object creation is expected to be lightweight, and the objects may be excessively
* created and discarded.
* Thread safe.
*/
@InterfaceAudience.Private

View File

@ -32,7 +32,7 @@ import org.junit.runner.notification.RunListener;
import org.junit.runner.notification.RunListener.ThreadSafe;
/**
* A RunListener to confirm that we have a {@link CategoryBasedTimeout} class rule for every test.
* A RunListener to confirm that we have a {@link HBaseClassTestRule} class rule for every test.
*/
@InterfaceAudience.Private
@ThreadSafe

View File

@ -37,8 +37,8 @@ import org.apache.hadoop.hbase.util.Threads;
/**
* Test implementation of a coprocessor endpoint exposing the
* {@link TestRpcServiceProtos.TestProtobufRpcProto} service methods. For internal use by unit tests
* only.
* {@link org.apache.hadoop.hbase.ipc.protobuf.generated.TestRpcServiceProtos.TestProtobufRpcProto}
* service methods. For internal use by unit tests only.
*/
public class ProtobufCoprocessorService extends TestRpcServiceProtos.TestProtobufRpcProto
implements MasterCoprocessor, RegionCoprocessor {

View File

@ -66,12 +66,11 @@ import java.util.Map;
* </pre>
* Internally, this input format restores each snapshot into a subdirectory of the given tmp
* directory. Input splits and
* record readers are created as described in {@link org.apache.hadoop.hbase.mapreduce
* .TableSnapshotInputFormat}
* record readers are created as described in
* {@link org.apache.hadoop.hbase.mapreduce.TableSnapshotInputFormat}
* (one per region).
* See {@link TableSnapshotInputFormat} for more notes on
* permissioning; the
* same caveats apply here.
* permissioning; the same caveats apply here.
*
* @see TableSnapshotInputFormat
* @see org.apache.hadoop.hbase.client.TableSnapshotScanner

View File

@ -44,9 +44,9 @@ import org.apache.hbase.thirdparty.com.google.common.collect.Maps;
/**
* Shared implementation of mapreduce code over multiple table snapshots.
* Utilized by both mapreduce ({@link org.apache.hadoop.hbase.mapreduce
* .MultiTableSnapshotInputFormat} and mapred
* ({@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations.
* Utilized by both mapreduce
* {@link org.apache.hadoop.hbase.mapreduce.MultiTableSnapshotInputFormat} and mapred
* {@link org.apache.hadoop.hbase.mapred.MultiTableSnapshotInputFormat} implementations.
*/
@InterfaceAudience.LimitedPrivate({ "HBase" })
@InterfaceStability.Evolving

View File

@ -186,7 +186,7 @@ To implement an Endpoint, you need to:
</ul>
<p>
For a more detailed discussion of how to implement a coprocessor Endpoint, along with some sample
code, see the {@link org.apache.hadoop.hbase.client.coprocessor} package documentation.
code, see the {@code org.apache.hadoop.hbase.client.coprocessor} package documentation.
</p>
<h2><a name="load">Coprocessor loading</a></h2>

View File

@ -28,6 +28,7 @@ import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Random;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.ClusterMetrics;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
@ -1028,8 +1029,10 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
abstract double cost();
@SuppressWarnings("checkstyle:linelength")
/**
* Function to compute a scaled cost using {@link org.apache.commons.math3.stat.descriptive.DescriptiveStatistics}.
* Function to compute a scaled cost using
* {@link org.apache.commons.math3.stat.descriptive.DescriptiveStatistics#DescriptiveStatistics()}.
* It assumes that this is a zero sum set of costs. It assumes that the worst case
* possible is all of the elements in one region server and the rest having 0.
*

View File

@ -93,7 +93,9 @@ public class ZKDataMigrator {
* Gets table state from ZK.
* @param zkw ZKWatcher instance to use
* @param tableName table we're checking
* @return Null or {@link ZooKeeperProtos.DeprecatedTableState.State} found in znode.
* @return Null or
* {@link org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos.DeprecatedTableState.State}
* found in znode.
* @throws KeeperException
* @deprecated Since 2.0.0. To be removed in hbase-3.0.0.
*/

View File

@ -176,7 +176,7 @@ public abstract class HBaseTestCase extends TestCase {
}
/**
* Create a table of name <code>name</code> with {@link COLUMNS} for
* Create a table of name {@code name} with {@link #COLUMNS} for
* families.
* @param name Name to give table.
* @return Column descriptor.
@ -186,7 +186,7 @@ public abstract class HBaseTestCase extends TestCase {
}
/**
* Create a table of name <code>name</code> with {@link COLUMNS} for
* Create a table of name {@code name} with {@link #COLUMNS} for
* families.
* @param name Name to give table.
* @param versions How many versions to allow per column.
@ -199,7 +199,7 @@ public abstract class HBaseTestCase extends TestCase {
}
/**
* Create a table of name <code>name</code> with {@link COLUMNS} for
* Create a table of name {@code name} with {@link #COLUMNS} for
* families.
* @param name Name to give table.
* @param versions How many versions to allow per column.

View File

@ -22,7 +22,7 @@ import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.mockito.Mockito;
/**
* {@link ConnectionImplementation} testing utility.
* {@link Connection} testing utility.
*/
public class HConnectionTestingUtility {
@ -32,9 +32,9 @@ public class HConnectionTestingUtility {
* to HBaseTestingUtility to give it access.
*/
/**
* Get a Mocked {@link ConnectionImplementation} that goes with the passed <code>conf</code>
* Get a Mocked {@link Connection} that goes with the passed <code>conf</code>
* configuration instance. Minimally the mock will return &lt;code>conf&lt;/conf> when
* {@link ConnectionImplementation#getConfiguration()} is invoked. Be sure to shutdown the
* {@link Connection#getConfiguration()} is invoked. Be sure to shutdown the
* connection when done by calling {@link Connection#close()} else it will stick around; this is
* probably not what you want.
* @param conf configuration

View File

@ -53,7 +53,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Run Increment tests that use the HBase clients; {@link HTable}.
* Run Increment tests that use the HBase clients; {@link TableBuilder}.
*
* Test is parameterized to run the slow and fast increment code paths. If fast, in the @before, we
* do a rolling restart of the single regionserver so that it can pick up the go fast configuration.

View File

@ -48,7 +48,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.HTable}.
* Run tests that use the HBase clients; {@link org.apache.hadoop.hbase.client.TableBuilder}.
* Sets up the HBase mini cluster once at start and runs through all client tests.
* Each creates a table named for the method and does its stuff against that.
*/

View File

@ -42,7 +42,7 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
/**
* Test the {@link MemStoreChunkPool} class
* Test the {@link org.apache.hadoop.hbase.regionserver.ChunkCreator.MemStoreChunkPool} class
*/
@Category({RegionServerTests.class, SmallTests.class})
public class TestMemStoreChunkPool {

View File

@ -146,7 +146,7 @@ public class HBaseZKTestingUtility extends HBaseCommonTestingUtility {
}
/**
* Shuts down zk cluster created by call to {@link #startMiniZKCluster(File)} or does nothing.
* Shuts down zk cluster created by call to {@link #startMiniZKCluster()} or does nothing.
* @see #startMiniZKCluster()
*/
public void shutdownMiniZKCluster() throws IOException {