diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index 568bf04ce16..b2f0f472f51 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -315,7 +315,7 @@ public class FileContext {
*
* @throws UnsupportedFileSystemException If the file system for
* absOrFqPath
is not supported.
- * @throws IOExcepton If the file system for absOrFqPath
could
+ * @throws IOException If the file system for absOrFqPath
could
* not be instantiated.
*/
protected AbstractFileSystem getFSofPath(final Path absOrFqPath)
@@ -2725,7 +2725,7 @@ public class FileContext {
/**
* Query the effective storage policy ID for the given file or directory.
*
- * @param src file or directory path.
+ * @param path file or directory path.
* @return storage policy for give file.
* @throws IOException
*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
index ac343fe3478..33e044f661b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
@@ -52,7 +52,7 @@ public interface FenceMethod {
/**
* Attempt to fence the target node.
- * @param serviceAddr the address (host:ipcport) of the service to fence
+ * @param target the address (host:ipcport) of the service to fence
* @param args the configured arguments, which were checked at startup by
* {@link #checkArgs(String)}
* @return true if fencing was successful, false if unsuccessful or
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
index f383d4ce3b4..83239d0cc01 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
@@ -151,12 +151,13 @@ public interface HAServiceProtocol {
/**
* Return the current status of the service. The status indicates
* the current state (e.g ACTIVE/STANDBY) as well as
- * some additional information. {@see HAServiceStatus}
- *
+ * some additional information.
+ *
* @throws AccessControlException
* if access is denied.
* @throws IOException
* if other errors happen
+ * @see HAServiceStatus
*/
@Idempotent
public HAServiceStatus getServiceStatus() throws AccessControlException,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
index 693065f35b2..089cf6f54c7 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package-info.java
@@ -15,6 +15,28 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * A mechanism for selectively retrying methods that throw exceptions under
+ * certain circumstances.
+ * Typical usage is
+ * UnreliableImplementation unreliableImpl = new UnreliableImplementation();
+ * UnreliableInterface unreliable = (UnreliableInterface)
+ * RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+ * RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10,
+ * TimeUnit.SECONDS));
+ * unreliable.call();
+ *
+ * This will retry any method called on unreliable
four times -
+ * in this case the call()
method - sleeping 10 seconds between
+ * each retry. There are a number of
+ * {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
+ * available, or you can implement a custom one by implementing
+ * {@link org.apache.hadoop.io.retry.RetryPolicy}.
+ * It is also possible to specify retry policies on a
+ * {@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map)
+ * per-method basis}.
+ */
@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
@InterfaceStability.Evolving
package org.apache.hadoop.io.retry;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html
deleted file mode 100644
index ae553fc7a62..00000000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/package.html
+++ /dev/null
@@ -1,48 +0,0 @@
-
-
-
-
-
-A mechanism for selectively retrying methods that throw exceptions under certain circumstances. -
- --Typical usage is -
- --UnreliableImplementation unreliableImpl = new UnreliableImplementation(); -UnreliableInterface unreliable = (UnreliableInterface) - RetryProxy.create(UnreliableInterface.class, unreliableImpl, - RetryPolicies.retryUpToMaximumCountWithFixedSleep(4, 10, TimeUnit.SECONDS)); -unreliable.call(); -- -
-This will retry any method called on unreliable
four times - in this case the call()
-method - sleeping 10 seconds between
-each retry. There are a number of {@link org.apache.hadoop.io.retry.RetryPolicies retry policies}
-available, or you can implement a custom one by implementing {@link org.apache.hadoop.io.retry.RetryPolicy}.
-It is also possible to specify retry policies on a
-{@link org.apache.hadoop.io.retry.RetryProxy#create(Class, Object, Map) per-method basis}.
-