diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2e04cc196f3..381705457a9 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -1108,6 +1108,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux. (Kiran Kumar M R via cnauroth) + HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal + tags in hadoop-tools. (Akira AJISAKA via ozawa) + Release 2.6.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java index 78cb360c253..9d0b3a42c22 100644 --- a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java +++ b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java @@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; public class DfsTask extends Task { /** - * Default sink for {@link java.lang.System.out System.out} - * and {@link java.lang.System.err System.err}. + * Default sink for {@link java.lang.System#out} + * and {@link java.lang.System#err}. */ private static final OutputStream nullOut = new OutputStream() { public void write(int b) { /* ignore */ } @@ -171,7 +171,7 @@ public class DfsTask extends Task { } /** - * Invoke {@link org.apache.hadoop.fs.FsShell#doMain FsShell.doMain} after a + * Invoke {@link org.apache.hadoop.fs.FsShell#main} after a * few cursory checks of the configuration. */ public void execute() throws BuildException { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java index dda3cf683fa..8bdfe9ac077 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java @@ -44,10 +44,9 @@ import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.util.Progressable; /** - *

* A block-based {@link FileSystem} backed by * Amazon S3. - *

+ * * @see NativeS3FileSystem */ @InterfaceAudience.Public @@ -70,7 +69,6 @@ public class S3FileSystem extends FileSystem { /** * Return the protocol scheme for the FileSystem. - *

* * @return s3 */ diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java index a29c47b99f6..68195817ee4 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java @@ -57,11 +57,11 @@ import java.util.concurrent.ThreadPoolExecutor; /** * Upload files/parts asap directly from a memory buffer (instead of buffering * to a file). - *

+ *

* Uploads are managed low-level rather than through the AWS TransferManager. * This allows for uploading each part of a multi-part upload as soon as * the bytes are in memory, rather than waiting until the file is closed. - *

+ *

* Unstable: statistics and error handling might evolve */ @InterfaceStability.Unstable diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java index acc5500d7f5..a2f9805ffaa 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java @@ -62,24 +62,29 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - *

* A {@link FileSystem} for reading and writing files stored on * Amazon S3. * Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation * stores files on S3 in their * native form so they can be read by other S3 tools. - * + *

* A note about directories. S3 of course has no "native" support for them. * The idiom we choose then is: for any directory created by this class, * we use an empty object "#{dirpath}_$folder$" as a marker. * Further, to interoperate with other S3 tools, we also accept the following: - * - an object "#{dirpath}/' denoting a directory marker - * - if there exists any objects with the prefix "#{dirpath}/", then the - * directory is said to exist - * - if both a file with the name of a directory and a marker for that - * directory exists, then the *file masks the directory*, and the directory - * is never returned. - *

+ * + * * @see org.apache.hadoop.fs.s3.S3FileSystem */ @InterfaceAudience.Public @@ -308,7 +313,6 @@ public class NativeS3FileSystem extends FileSystem { /** * Return the protocol scheme for the FileSystem. - *

* * @return s3n */ diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java index c6ba84fc479..5dc096397d2 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java @@ -600,8 +600,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { * Azure. * * @throws AzureException - * @throws ConfigurationException - * */ private void configureAzureStorageSession() throws AzureException { @@ -705,7 +703,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { * raised on errors communicating with Azure storage. * @throws IOException * raised on errors performing I/O or setting up the session. - * @throws URISyntaxExceptions + * @throws URISyntaxException * raised on creating mal-formed URI's. */ private void connectUsingAnonymousCredentials(final URI uri) @@ -1036,7 +1034,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { /** * Checks if the given key in Azure Storage should be stored as a page * blob instead of block blob. - * @throws URISyntaxException */ public boolean isPageBlobKey(String key) { return isKeyForDirectorySet(key, pageBlobDirs); @@ -1755,7 +1752,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { * the path and returns a path relative to the root directory of the * container. * - * @param blob + * @param directory * - adjust the key to this directory to a path relative to the root * directory * @@ -2142,14 +2139,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { * uses a in-order first traversal of blob directory structures to maintain * the sorted order of the blob names. * - * @param dir - * -- Azure blob directory - * - * @param list - * -- a list of file metadata objects for each non-directory blob. - * - * @param maxListingLength - * -- maximum length of the built up list. + * @param aCloudBlobDirectory Azure blob directory + * @param aFileMetadataList a list of file metadata objects for each + * non-directory blob. + * @param maxListingCount maximum length of the built up list. */ private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory, ArrayList aFileMetadataList, final int maxListingCount, @@ -2320,8 +2313,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore { * swallow the error since what most probably happened is that * the first operation succeeded on the server. * @param blob The blob to delete. - * @param leaseID A string identifying the lease, or null if no - * lease is to be used. + * @param lease Azure blob lease, or null if no lease is to be used. * @throws StorageException */ private void safeDelete(CloudBlobWrapper blob, SelfRenewingLease lease) throws StorageException { diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java index e39b37d5e49..623645a414d 100644 --- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java +++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/NativeAzureFileSystem.java @@ -81,12 +81,10 @@ import com.microsoft.azure.storage.blob.CloudBlob; import com.microsoft.azure.storage.core.*; /** - *

* A {@link FileSystem} for reading and writing files stored on Windows Azure. This implementation is * blob-based and stores files on Azure in their native form so they can be read * by other Azure tools. - *

*/ @InterfaceAudience.Public @InterfaceStability.Stable @@ -218,9 +216,11 @@ public class NativeAzureFileSystem extends FileSystem { } /** - * Write to disk the information needed to redo folder rename, in JSON format. - * The file name will be wasb:///folderName-RenamePending.json + * Write to disk the information needed to redo folder rename, + * in JSON format. The file name will be + * {@code wasb:///folderName-RenamePending.json} * The file format will be: + *
{@code
      * {
      *   FormatVersion: "1.0",
      *   OperationTime: "",
@@ -239,7 +239,7 @@ public class NativeAzureFileSystem extends FileSystem {
      *    "innerFile",
      *    "innerFile2"
      *  ]
-     * }
+     * } }
* @throws IOException */ public void writeFile(FileSystem fs) throws IOException { @@ -913,9 +913,6 @@ public class NativeAzureFileSystem extends FileSystem { * The create also includes the name of the original key value which is * stored in the m_key member variable. This method should only be called * when the stream is closed. - * - * @param anEncodedKey - * Encoding of the original key stored in m_key member. */ private void restoreKey() throws IOException { store.rename(getEncodedKey(), getKey()); @@ -1796,7 +1793,7 @@ public class NativeAzureFileSystem extends FileSystem { * * @param permission * The permission to mask. - * @param applyDefaultUmask + * @param applyMode * Whether to also apply the default umask. * @return The masked persmission. */ @@ -2409,7 +2406,6 @@ public class NativeAzureFileSystem extends FileSystem { * recover the original key. * * @param aKey - * @param numBuckets * @return Encoded version of the original key. */ private static String encodeKey(String aKey) { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java index 9b31dfbdd8f..8af799a48fe 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java @@ -79,7 +79,7 @@ public final class CopyListingFileStatus extends FileStatus { /** * Returns the full logical ACL. * - * @return List containing full logical ACL + * @return List containing full logical ACL */ public List getAclEntries() { return AclUtil.getAclFromPermAndEntries(getPermission(), @@ -89,7 +89,7 @@ public final class CopyListingFileStatus extends FileStatus { /** * Sets optional ACL entries. * - * @param aclEntries List containing all ACL entries + * @param aclEntries List containing all ACL entries */ public void setAclEntries(List aclEntries) { this.aclEntries = aclEntries; @@ -98,7 +98,7 @@ public final class CopyListingFileStatus extends FileStatus { /** * Returns all xAttrs. * - * @return Map containing all xAttrs + * @return Map containing all xAttrs */ public Map getXAttrs() { return xAttrs != null ? xAttrs : Collections.emptyMap(); @@ -107,7 +107,7 @@ public final class CopyListingFileStatus extends FileStatus { /** * Sets optional xAttrs. * - * @param xAttrs Map containing all xAttrs + * @param xAttrs Map containing all xAttrs */ public void setXAttrs(Map xAttrs) { this.xAttrs = xAttrs; diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java index f9cfc86d9ea..6dc827a92e3 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java @@ -141,7 +141,7 @@ public class SimpleCopyListing extends CopyListing { } /** * Collect the list of - * + * {@literal } * to be copied and write to the sequence file. In essence, any file or * directory that need to be copied or sync-ed is written as an entry to the * sequence file, with the possible exception of the source root: diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java index 20fdf112e1c..d34faafba87 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/util/DistCpUtils.java @@ -270,7 +270,7 @@ public class DistCpUtils { * * @param fileSystem FileSystem containing the file * @param fileStatus FileStatus of file - * @return List containing full logical ACL + * @return List containing full logical ACL * @throws IOException if there is an I/O error */ public static List getAcl(FileSystem fileSystem, @@ -285,7 +285,7 @@ public class DistCpUtils { * * @param fileSystem FileSystem containing the file * @param path file path - * @return Map containing all xAttrs + * @return Map containing all xAttrs * @throws IOException if there is an I/O error */ public static Map getXAttrs(FileSystem fileSystem, diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Buffer.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Buffer.java index 50cc1a1912f..737d63d11d6 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Buffer.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Buffer.java @@ -25,10 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability; /** * A byte sequence that is used as a Java native type for buffer. - * It is resizable and distinguishes between the count of the seqeunce and + * It is resizable and distinguishes between the count of the sequence and * the current capacity. * - * @deprecated Replaced by Avro. + * @deprecated Replaced by Avro. */ @Deprecated @InterfaceAudience.Public @@ -124,7 +124,7 @@ public class Buffer implements Comparable, Cloneable { /** * Change the capacity of the backing storage. - * The data is preserved if newCapacity >= getCount(). + * The data is preserved if newCapacity {@literal >=} getCount(). * @param newCapacity The new capacity in bytes. */ public void setCapacity(int newCapacity) { @@ -162,7 +162,7 @@ public class Buffer implements Comparable, Cloneable { public void truncate() { setCapacity(count); } - + /** * Append specified bytes to the buffer. * diff --git a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Utils.java b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Utils.java index d5be59c92c1..59e2080c3ed 100644 --- a/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Utils.java +++ b/hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/record/Utils.java @@ -28,9 +28,9 @@ import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; /** - * Various utility functions for Hadooop record I/O runtime. + * Various utility functions for Hadoop record I/O runtime. * - * @deprecated Replaced by Avro. + * @deprecated Replaced by Avro. */ @Deprecated @InterfaceAudience.Public @@ -462,8 +462,8 @@ public class Utils { /** * Serializes a long to a binary stream with zero-compressed encoding. - * For -112 <= i <= 127, only one byte is used with the actual value. - * For other values of i, the first byte value indicates whether the + * For {@literal -112 <= i <= 127}, only one byte is used with the actual + * value. For other values of i, the first byte value indicates whether the * long is positive or negative, and the number of bytes that follow. * If the first byte value v is between -113 and -120, the following long * is positive, with number of bytes that follow are -(v+112).