HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal tags in hadoop-tools. Contributed by Akira AJISAKA.

(cherry picked from commit ef9946cd52)
This commit is contained in:
Tsuyoshi Ozawa 2015-03-17 16:09:21 +09:00
parent bb828565f8
commit 77297017d8
12 changed files with 51 additions and 58 deletions

View File

@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED
HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD
and Solaris in addition to Linux. (Kiran Kumar M R via cnauroth)
HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal
tags in hadoop-tools. (Akira AJISAKA via ozawa)
Release 2.6.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -25,10 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* A byte sequence that is used as a Java native type for buffer.
* It is resizable and distinguishes between the count of the seqeunce and
* It is resizable and distinguishes between the count of the sequence and
* the current capacity.
*
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
* @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>.
*/
@Deprecated
@InterfaceAudience.Public
@ -124,7 +124,7 @@ public class Buffer implements Comparable, Cloneable {
/**
* Change the capacity of the backing storage.
* The data is preserved if newCapacity >= getCount().
* The data is preserved if newCapacity {@literal >=} getCount().
* @param newCapacity The new capacity in bytes.
*/
public void setCapacity(int newCapacity) {
@ -162,7 +162,7 @@ public class Buffer implements Comparable, Cloneable {
public void truncate() {
setCapacity(count);
}
/**
* Append specified bytes to the buffer.
*

View File

@ -28,9 +28,9 @@ import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
/**
* Various utility functions for Hadooop record I/O runtime.
* Various utility functions for Hadoop record I/O runtime.
*
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
* @deprecated Replaced by <a href="http://avro.apache.org/">Avro</a>.
*/
@Deprecated
@InterfaceAudience.Public
@ -462,8 +462,8 @@ public class Utils {
/**
* Serializes a long to a binary stream with zero-compressed encoding.
* For -112 <= i <= 127, only one byte is used with the actual value.
* For other values of i, the first byte value indicates whether the
* For {@literal -112 <= i <= 127}, only one byte is used with the actual
* value. For other values of i, the first byte value indicates whether the
* long is positive or negative, and the number of bytes that follow.
* If the first byte value v is between -113 and -120, the following long
* is positive, with number of bytes that follow are -(v+112).

View File

@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
public class DfsTask extends Task {
/**
* Default sink for {@link java.lang.System.out System.out}
* and {@link java.lang.System.err System.err}.
* Default sink for {@link java.lang.System#out}
* and {@link java.lang.System#err}.
*/
private static final OutputStream nullOut = new OutputStream() {
public void write(int b) { /* ignore */ }
@ -171,7 +171,7 @@ public class DfsTask extends Task {
}
/**
* Invoke {@link org.apache.hadoop.fs.FsShell#doMain FsShell.doMain} after a
* Invoke {@link org.apache.hadoop.fs.FsShell#main} after a
* few cursory checks of the configuration.
*/
public void execute() throws BuildException {

View File

@ -44,10 +44,9 @@ import org.apache.hadoop.io.retry.RetryProxy;
import org.apache.hadoop.util.Progressable;
/**
* <p>
* A block-based {@link FileSystem} backed by
* <a href="http://aws.amazon.com/s3">Amazon S3</a>.
* </p>
*
* @see NativeS3FileSystem
*/
@InterfaceAudience.Public
@ -70,7 +69,6 @@ public class S3FileSystem extends FileSystem {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>s3</code>
*/

View File

@ -57,11 +57,11 @@ import java.util.concurrent.ThreadPoolExecutor;
/**
* Upload files/parts asap directly from a memory buffer (instead of buffering
* to a file).
* <p/>
* <p>
* Uploads are managed low-level rather than through the AWS TransferManager.
* This allows for uploading each part of a multi-part upload as soon as
* the bytes are in memory, rather than waiting until the file is closed.
* <p/>
* <p>
* Unstable: statistics and error handling might evolve
*/
@InterfaceStability.Unstable

View File

@ -62,24 +62,29 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* <p>
* A {@link FileSystem} for reading and writing files stored on
* <a href="http://aws.amazon.com/s3">Amazon S3</a>.
* Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation
* stores files on S3 in their
* native form so they can be read by other S3 tools.
*
* <p>
* A note about directories. S3 of course has no "native" support for them.
* The idiom we choose then is: for any directory created by this class,
* we use an empty object "#{dirpath}_$folder$" as a marker.
* Further, to interoperate with other S3 tools, we also accept the following:
* - an object "#{dirpath}/' denoting a directory marker
* - if there exists any objects with the prefix "#{dirpath}/", then the
* directory is said to exist
* - if both a file with the name of a directory and a marker for that
* directory exists, then the *file masks the directory*, and the directory
* is never returned.
* </p>
* <ul>
* <li>an object "#{dirpath}/' denoting a directory marker</li>
* <li>
* if there exists any objects with the prefix "#{dirpath}/", then the
* directory is said to exist
* </li>
* <li>
* if both a file with the name of a directory and a marker for that
* directory exists, then the *file masks the directory*, and the directory
* is never returned.
* </li>
* </ul>
*
* @see org.apache.hadoop.fs.s3.S3FileSystem
*/
@InterfaceAudience.Public
@ -308,7 +313,6 @@ public class NativeS3FileSystem extends FileSystem {
/**
* Return the protocol scheme for the FileSystem.
* <p/>
*
* @return <code>s3n</code>
*/

View File

@ -600,8 +600,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* Azure.
*
* @throws AzureException
* @throws ConfigurationException
*
*/
private void configureAzureStorageSession() throws AzureException {
@ -705,7 +703,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* raised on errors communicating with Azure storage.
* @throws IOException
* raised on errors performing I/O or setting up the session.
* @throws URISyntaxExceptions
* @throws URISyntaxException
* raised on creating mal-formed URI's.
*/
private void connectUsingAnonymousCredentials(final URI uri)
@ -1036,7 +1034,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
/**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
* @throws URISyntaxException
*/
public boolean isPageBlobKey(String key) {
return isKeyForDirectorySet(key, pageBlobDirs);
@ -1755,7 +1752,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* the path and returns a path relative to the root directory of the
* container.
*
* @param blob
* @param directory
* - adjust the key to this directory to a path relative to the root
* directory
*
@ -2142,14 +2139,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names.
*
* @param dir
* -- Azure blob directory
*
* @param list
* -- a list of file metadata objects for each non-directory blob.
*
* @param maxListingLength
* -- maximum length of the built up list.
* @param aCloudBlobDirectory Azure blob directory
* @param aFileMetadataList a list of file metadata objects for each
* non-directory blob.
* @param maxListingCount maximum length of the built up list.
*/
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
ArrayList<FileMetadata> aFileMetadataList, final int maxListingCount,
@ -2320,8 +2313,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* swallow the error since what most probably happened is that
* the first operation succeeded on the server.
* @param blob The blob to delete.
* @param leaseID A string identifying the lease, or null if no
* lease is to be used.
* @param lease Azure blob lease, or null if no lease is to be used.
* @throws StorageException
*/
private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException {

View File

@ -81,12 +81,10 @@ import com.microsoft.azure.storage.blob.CloudBlob;
import com.microsoft.azure.storage.core.*;
/**
* <p>
* A {@link FileSystem} for reading and writing files stored on <a
* href="http://store.azure.com/">Windows Azure</a>. This implementation is
* blob-based and stores files on Azure in their native form so they can be read
* by other Azure tools.
* </p>
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
@ -218,9 +216,11 @@ public class NativeAzureFileSystem extends FileSystem {
}
/**
* Write to disk the information needed to redo folder rename, in JSON format.
* The file name will be wasb://<sourceFolderPrefix>/folderName-RenamePending.json
* Write to disk the information needed to redo folder rename,
* in JSON format. The file name will be
* {@code wasb://<sourceFolderPrefix>/folderName-RenamePending.json}
* The file format will be:
* <pre>{@code
* {
* FormatVersion: "1.0",
* OperationTime: "<YYYY-MM-DD HH:MM:SS.MMM>",
@ -239,7 +239,7 @@ public class NativeAzureFileSystem extends FileSystem {
* "innerFile",
* "innerFile2"
* ]
* }
* } }</pre>
* @throws IOException
*/
public void writeFile(FileSystem fs) throws IOException {
@ -913,9 +913,6 @@ public class NativeAzureFileSystem extends FileSystem {
* The create also includes the name of the original key value which is
* stored in the m_key member variable. This method should only be called
* when the stream is closed.
*
* @param anEncodedKey
* Encoding of the original key stored in m_key member.
*/
private void restoreKey() throws IOException {
store.rename(getEncodedKey(), getKey());
@ -1796,7 +1793,7 @@ public class NativeAzureFileSystem extends FileSystem {
*
* @param permission
* The permission to mask.
* @param applyDefaultUmask
* @param applyMode
* Whether to also apply the default umask.
* @return The masked persmission.
*/
@ -2409,7 +2406,6 @@ public class NativeAzureFileSystem extends FileSystem {
* recover the original key.
*
* @param aKey
* @param numBuckets
* @return Encoded version of the original key.
*/
private static String encodeKey(String aKey) {

View File

@ -79,7 +79,7 @@ public final class CopyListingFileStatus extends FileStatus {
/**
* Returns the full logical ACL.
*
* @return List<AclEntry> containing full logical ACL
* @return List containing full logical ACL
*/
public List<AclEntry> getAclEntries() {
return AclUtil.getAclFromPermAndEntries(getPermission(),
@ -89,7 +89,7 @@ public final class CopyListingFileStatus extends FileStatus {
/**
* Sets optional ACL entries.
*
* @param aclEntries List<AclEntry> containing all ACL entries
* @param aclEntries List containing all ACL entries
*/
public void setAclEntries(List<AclEntry> aclEntries) {
this.aclEntries = aclEntries;
@ -98,7 +98,7 @@ public final class CopyListingFileStatus extends FileStatus {
/**
* Returns all xAttrs.
*
* @return Map<String, byte[]> containing all xAttrs
* @return Map containing all xAttrs
*/
public Map<String, byte[]> getXAttrs() {
return xAttrs != null ? xAttrs : Collections.<String, byte[]>emptyMap();
@ -107,7 +107,7 @@ public final class CopyListingFileStatus extends FileStatus {
/**
* Sets optional xAttrs.
*
* @param xAttrs Map<String, byte[]> containing all xAttrs
* @param xAttrs Map containing all xAttrs
*/
public void setXAttrs(Map<String, byte[]> xAttrs) {
this.xAttrs = xAttrs;

View File

@ -141,7 +141,7 @@ public class SimpleCopyListing extends CopyListing {
}
/**
* Collect the list of
* <sourceRelativePath, sourceFileStatus>
* {@literal <sourceRelativePath, sourceFileStatus>}
* to be copied and write to the sequence file. In essence, any file or
* directory that need to be copied or sync-ed is written as an entry to the
* sequence file, with the possible exception of the source root:

View File

@ -270,7 +270,7 @@ public class DistCpUtils {
*
* @param fileSystem FileSystem containing the file
* @param fileStatus FileStatus of file
* @return List<AclEntry> containing full logical ACL
* @return List containing full logical ACL
* @throws IOException if there is an I/O error
*/
public static List<AclEntry> getAcl(FileSystem fileSystem,
@ -285,7 +285,7 @@ public class DistCpUtils {
*
* @param fileSystem FileSystem containing the file
* @param path file path
* @return Map<String, byte[]> containing all xAttrs
* @return Map containing all xAttrs
* @throws IOException if there is an I/O error
*/
public static Map<String, byte[]> getXAttrs(FileSystem fileSystem,