HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal tags in hadoop-tools. Contributed by Akira AJISAKA.

(cherry picked from commit ef9946cd52)
This commit is contained in:
Tsuyoshi Ozawa 2015-03-17 16:09:21 +09:00
parent bb828565f8
commit 77297017d8
12 changed files with 51 additions and 58 deletions

View File

@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD
and Solaris in addition to Linux. (Kiran Kumar M R via cnauroth) and Solaris in addition to Linux. (Kiran Kumar M R via cnauroth)
HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal
tags in hadoop-tools. (Akira AJISAKA via ozawa)
Release 2.6.1 - UNRELEASED Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -25,10 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability;
/** /**
* A byte sequence that is used as a Java native type for buffer. * A byte sequence that is used as a Java native type for buffer.
* It is resizable and distinguishes between the count of the seqeunce and * It is resizable and distinguishes between the count of the sequence and
* the current capacity. * the current capacity.
* *
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. * @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>.
*/ */
@Deprecated @Deprecated
@InterfaceAudience.Public @InterfaceAudience.Public
@ -124,7 +124,7 @@ public class Buffer implements Comparable, Cloneable {
/** /**
* Change the capacity of the backing storage. * Change the capacity of the backing storage.
* The data is preserved if newCapacity >= getCount(). * The data is preserved if newCapacity {@literal >=} getCount().
* @param newCapacity The new capacity in bytes. * @param newCapacity The new capacity in bytes.
*/ */
public void setCapacity(int newCapacity) { public void setCapacity(int newCapacity) {
@ -162,7 +162,7 @@ public class Buffer implements Comparable, Cloneable {
public void truncate() { public void truncate() {
setCapacity(count); setCapacity(count);
} }
/** /**
* Append specified bytes to the buffer. * Append specified bytes to the buffer.
* *

View File

@ -28,9 +28,9 @@ import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.io.WritableUtils;
/** /**
* Various utility functions for Hadooop record I/O runtime. * Various utility functions for Hadoop record I/O runtime.
* *
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>. * @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>.
*/ */
@Deprecated @Deprecated
@InterfaceAudience.Public @InterfaceAudience.Public
@ -462,8 +462,8 @@ public class Utils {
/** /**
* Serializes a long to a binary stream with zero-compressed encoding. * Serializes a long to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value. * For {@literal -112 <= i <= 127}, only one byte is used with the actual
* For other values of i, the first byte value indicates whether the * value. For other values of i, the first byte value indicates whether the
* long is positive or negative, and the number of bytes that follow. * long is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -120, the following long * If the first byte value v is between -113 and -120, the following long
* is positive, with number of bytes that follow are -(v+112). * is positive, with number of bytes that follow are -(v+112).

View File

@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
public class DfsTask extends Task { public class DfsTask extends Task {
/** /**
* Default sink for {@link java.lang.System.out System.out} * Default sink for {@link java.lang.System#out}
* and {@link java.lang.System.err System.err}. * and {@link java.lang.System#err}.
*/ */
private static final OutputStream nullOut = new OutputStream() { private static final OutputStream nullOut = new OutputStream() {
public void write(int b) { /* ignore */ } public void write(int b) { /* ignore */ }
@ -171,7 +171,7 @@ public class DfsTask extends Task {
} }
/** /**
* Invoke {@link org.apache.hadoop.fs.FsShell#doMain FsShell.doMain} after a * Invoke {@link org.apache.hadoop.fs.FsShell#main} after a
* few cursory checks of the configuration. * few cursory checks of the configuration.
*/ */
public void execute() throws BuildException { public void execute() throws BuildException {

View File

@ -44,10 +44,9 @@ import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
/** /**
* <p>
* A block-based {@link FileSystem} backed by * A block-based {@link FileSystem} backed by
* <a href="http://aws.amazon.com/s3">Amazon S3</a>. * <a href="http://aws.amazon.com/s3">Amazon S3</a>.
* </p> *
* @see NativeS3FileSystem * @see NativeS3FileSystem
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@ -70,7 +69,6 @@ public class S3FileSystem extends FileSystem {
/** /**
* Return the protocol scheme for the FileSystem. * Return the protocol scheme for the FileSystem.
* <p/>
* *
* @return <code>s3</code> * @return <code>s3</code>
*/ */

View File

@ -57,11 +57,11 @@ import java.util.concurrent.ThreadPoolExecutor;
/** /**
* Upload files/parts asap directly from a memory buffer (instead of buffering * Upload files/parts asap directly from a memory buffer (instead of buffering
* to a file). * to a file).
* <p/> * <p>
* Uploads are managed low-level rather than through the AWS TransferManager. * Uploads are managed low-level rather than through the AWS TransferManager.
* This allows for uploading each part of a multi-part upload as soon as * This allows for uploading each part of a multi-part upload as soon as
* the bytes are in memory, rather than waiting until the file is closed. * the bytes are in memory, rather than waiting until the file is closed.
* <p/> * <p>
* Unstable: statistics and error handling might evolve * Unstable: statistics and error handling might evolve
*/ */
@InterfaceStability.Unstable @InterfaceStability.Unstable

View File

@ -62,24 +62,29 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
/** /**
* <p>
* A {@link FileSystem} for reading and writing files stored on * A {@link FileSystem} for reading and writing files stored on
* <a href="http://aws.amazon.com/s3">Amazon S3</a>. * <a href="http://aws.amazon.com/s3">Amazon S3</a>.
* Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation * Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
* stores files on S3 in their * stores files on S3 in their
* native form so they can be read by other S3 tools. * native form so they can be read by other S3 tools.
* * <p>
* A note about directories. S3 of course has no "native" support for them. * A note about directories. S3 of course has no "native" support for them.
* The idiom we choose then is: for any directory created by this class, * The idiom we choose then is: for any directory created by this class,
* we use an empty object "#{dirpath}_$folder$" as a marker. * we use an empty object "#{dirpath}_$folder$" as a marker.
* Further, to interoperate with other S3 tools, we also accept the following: * Further, to interoperate with other S3 tools, we also accept the following:
* - an object "#{dirpath}/' denoting a directory marker * <ul>
* - if there exists any objects with the prefix "#{dirpath}/", then the * <li>an object "#{dirpath}/' denoting a directory marker</li>
* directory is said to exist * <li>
* - if both a file with the name of a directory and a marker for that * if there exists any objects with the prefix "#{dirpath}/", then the
* directory exists, then the *file masks the directory*, and the directory * directory is said to exist
* is never returned. * </li>
* </p> * <li>
* if both a file with the name of a directory and a marker for that
* directory exists, then the *file masks the directory*, and the directory
* is never returned.
* </li>
* </ul>
*
* @see org.apache.hadoop.fs.s3.S3FileSystem * @see org.apache.hadoop.fs.s3.S3FileSystem
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@ -308,7 +313,6 @@ public class NativeS3FileSystem extends FileSystem {
/** /**
* Return the protocol scheme for the FileSystem. * Return the protocol scheme for the FileSystem.
* <p/>
* *
* @return <code>s3n</code> * @return <code>s3n</code>
*/ */

View File

@ -600,8 +600,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* Azure. * Azure.
* *
* @throws AzureException * @throws AzureException
* @throws ConfigurationException
*
*/ */
private void configureAzureStorageSession() throws AzureException { private void configureAzureStorageSession() throws AzureException {
@ -705,7 +703,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* raised on errors communicating with Azure storage. * raised on errors communicating with Azure storage.
* @throws IOException * @throws IOException
* raised on errors performing I/O or setting up the session. * raised on errors performing I/O or setting up the session.
* @throws URISyntaxExceptions * @throws URISyntaxException
* raised on creating mal-formed URI's. * raised on creating mal-formed URI's.
*/ */
private void connectUsingAnonymousCredentials(final URI uri) private void connectUsingAnonymousCredentials(final URI uri)
@ -1036,7 +1034,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
/** /**
* Checks if the given key in Azure Storage should be stored as a page * Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob. * blob instead of block blob.
* @throws URISyntaxException
*/ */
public boolean isPageBlobKey(String key) { public boolean isPageBlobKey(String key) {
return isKeyForDirectorySet(key, pageBlobDirs); return isKeyForDirectorySet(key, pageBlobDirs);
@ -1755,7 +1752,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* the path and returns a path relative to the root directory of the * the path and returns a path relative to the root directory of the
* container. * container.
* *
* @param blob * @param directory
* - adjust the key to this directory to a path relative to the root * - adjust the key to this directory to a path relative to the root
* directory * directory
* *
@ -2142,14 +2139,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* uses a in-order first traversal of blob directory structures to maintain * uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names. * the sorted order of the blob names.
* *
* @param dir * @param aCloudBlobDirectory Azure blob directory
* -- Azure blob directory * @param aFileMetadataList a list of file metadata objects for each
* * non-directory blob.
* @param list * @param maxListingCount maximum length of the built up list.
* -- a list of file metadata objects for each non-directory blob.
*
* @param maxListingLength
* -- maximum length of the built up list.
*/ */
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory, private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
ArrayList<FileMetadata> aFileMetadataList, final int maxListingCount, ArrayList<FileMetadata> aFileMetadataList, final int maxListingCount,
@ -2320,8 +2313,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* swallow the error since what most probably happened is that * swallow the error since what most probably happened is that
* the first operation succeeded on the server. * the first operation succeeded on the server.
* @param blob The blob to delete. * @param blob The blob to delete.
* @param leaseID A string identifying the lease, or null if no * @param lease Azure blob lease, or null if no lease is to be used.
* lease is to be used.
* @throws StorageException * @throws StorageException
*/ */
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException { private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {

View File

@ -81,12 +81,10 @@ import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.core.*; import com.microsoft.azure.storage.core.*;
/** /**
* <p>
* A {@link FileSystem} for reading and writing files stored on <a * A {@link FileSystem} for reading and writing files stored on <a
* href="http://store.azure.com/">Windows Azure</a>. This implementation is * href="http://store.azure.com/">Windows Azure</a>. This implementation is
* blob-based and stores files on Azure in their native form so they can be read * blob-based and stores files on Azure in their native form so they can be read
* by other Azure tools. * by other Azure tools.
* </p>
*/ */
@InterfaceAudience.Public @InterfaceAudience.Public
@InterfaceStability.Stable @InterfaceStability.Stable
@ -218,9 +216,11 @@ public class NativeAzureFileSystem extends FileSystem {
} }
/** /**
* Write to disk the information needed to redo folder rename, in JSON format. * Write to disk the information needed to redo folder rename,
* The file name will be wasb://<sourceFolderPrefix>/folderName-RenamePending.json * in JSON format. The file name will be
* {@code wasb://<sourceFolderPrefix>/folderName-RenamePending.json}
* The file format will be: * The file format will be:
* <pre>{@code
* { * {
* FormatVersion: "1.0", * FormatVersion: "1.0",
* OperationTime: "<YYYY-MM-DD HH:MM:SS.MMM>", * OperationTime: "<YYYY-MM-DD HH:MM:SS.MMM>",
@ -239,7 +239,7 @@ public class NativeAzureFileSystem extends FileSystem {
* "innerFile", * "innerFile",
* "innerFile2" * "innerFile2"
* ] * ]
* } * } }</pre>
* @throws IOException * @throws IOException
*/ */
public void writeFile(FileSystem fs) throws IOException { public void writeFile(FileSystem fs) throws IOException {
@ -913,9 +913,6 @@ public class NativeAzureFileSystem extends FileSystem {
* The create also includes the name of the original key value which is * The create also includes the name of the original key value which is
* stored in the m_key member variable. This method should only be called * stored in the m_key member variable. This method should only be called
* when the stream is closed. * when the stream is closed.
*
* @param anEncodedKey
* Encoding of the original key stored in m_key member.
*/ */
private void restoreKey() throws IOException { private void restoreKey() throws IOException {
store.rename(getEncodedKey(), getKey()); store.rename(getEncodedKey(), getKey());
@ -1796,7 +1793,7 @@ public class NativeAzureFileSystem extends FileSystem {
* *
* @param permission * @param permission
* The permission to mask. * The permission to mask.
* @param applyDefaultUmask * @param applyMode
* Whether to also apply the default umask. * Whether to also apply the default umask.
* @return The masked persmission. * @return The masked persmission.
*/ */
@ -2409,7 +2406,6 @@ public class NativeAzureFileSystem extends FileSystem {
* recover the original key. * recover the original key.
* *
* @param aKey * @param aKey
* @param numBuckets
* @return Encoded version of the original key. * @return Encoded version of the original key.
*/ */
private static String encodeKey(String aKey) { private static String encodeKey(String aKey) {

View File

@ -79,7 +79,7 @@ public final class CopyListingFileStatus extends FileStatus {
/** /**
* Returns the full logical ACL. * Returns the full logical ACL.
* *
* @return List<AclEntry> containing full logical ACL * @return List containing full logical ACL
*/ */
public List<AclEntry> getAclEntries() { public List<AclEntry> getAclEntries() {
return AclUtil.getAclFromPermAndEntries(getPermission(), return AclUtil.getAclFromPermAndEntries(getPermission(),
@ -89,7 +89,7 @@ public final class CopyListingFileStatus extends FileStatus {
/** /**
* Sets optional ACL entries. * Sets optional ACL entries.
* *
* @param aclEntries List<AclEntry> containing all ACL entries * @param aclEntries List containing all ACL entries
*/ */
public void setAclEntries(List<AclEntry> aclEntries) { public void setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries; this.aclEntries = aclEntries;
@ -98,7 +98,7 @@ public final class CopyListingFileStatus extends FileStatus {
/** /**
* Returns all xAttrs. * Returns all xAttrs.
* *
* @return Map<String, byte[]> containing all xAttrs * @return Map containing all xAttrs
*/ */
public Map<String, byte[]> getXAttrs() { public Map<String, byte[]> getXAttrs() {
return xAttrs != null ? xAttrs : Collections.<String, byte[]>emptyMap(); return xAttrs != null ? xAttrs : Collections.<String, byte[]>emptyMap();
@ -107,7 +107,7 @@ public final class CopyListingFileStatus extends FileStatus {
/** /**
* Sets optional xAttrs. * Sets optional xAttrs.
* *
* @param xAttrs Map<String, byte[]> containing all xAttrs * @param xAttrs Map containing all xAttrs
*/ */
public void setXAttrs(Map<String, byte[]> xAttrs) { public void setXAttrs(Map<String, byte[]> xAttrs) {
this.xAttrs = xAttrs; this.xAttrs = xAttrs;

View File

@ -141,7 +141,7 @@ public class SimpleCopyListing extends CopyListing {
} }
/** /**
* Collect the list of * Collect the list of
* <sourceRelativePath, sourceFileStatus> * {@literal <sourceRelativePath, sourceFileStatus>}
* to be copied and write to the sequence file. In essence, any file or * to be copied and write to the sequence file. In essence, any file or
* directory that need to be copied or sync-ed is written as an entry to the * directory that need to be copied or sync-ed is written as an entry to the
* sequence file, with the possible exception of the source root: * sequence file, with the possible exception of the source root:

View File

@ -270,7 +270,7 @@ public class DistCpUtils {
* *
* @param fileSystem FileSystem containing the file * @param fileSystem FileSystem containing the file
* @param fileStatus FileStatus of file * @param fileStatus FileStatus of file
* @return List<AclEntry> containing full logical ACL * @return List containing full logical ACL
* @throws IOException if there is an I/O error * @throws IOException if there is an I/O error
*/ */
public static List<AclEntry> getAcl(FileSystem fileSystem, public static List<AclEntry> getAcl(FileSystem fileSystem,
@ -285,7 +285,7 @@ public class DistCpUtils {
* *
* @param fileSystem FileSystem containing the file * @param fileSystem FileSystem containing the file
* @param path file path * @param path file path
* @return Map<String, byte[]> containing all xAttrs * @return Map containing all xAttrs
* @throws IOException if there is an I/O error * @throws IOException if there is an I/O error
*/ */
public static Map<String, byte[]> getXAttrs(FileSystem fileSystem, public static Map<String, byte[]> getXAttrs(FileSystem fileSystem,