diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index a1d2125bf17..7f47197aca4 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -697,6 +697,9 @@ Release 2.7.0 - UNRELEASED HADOOP-11638. OpensslSecureRandom.c pthreads_thread_id should support FreeBSD and Solaris in addition to Linux. (Kiran Kumar M R via cnauroth) + HADOOP-11720. [JDK8] Fix javadoc errors caused by incorrect or illegal + tags in hadoop-tools. (Akira AJISAKA via ozawa) + Release 2.6.1 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java index 50cc1a1912f..737d63d11d6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java @@ -25,10 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability; /** * A byte sequence that is used as a Java native type for buffer. - * It is resizable and distinguishes between the count of the seqeunce and + * It is resizable and distinguishes between the count of the sequence and * the current capacity. * - * @deprecated Replaced by Avro. + * @deprecated Replaced by Avro. */ @Deprecated @InterfaceAudience.Public @@ -124,7 +124,7 @@ public class Buffer implements Comparable, Cloneable { /** * Change the capacity of the backing storage. - * The data is preserved if newCapacity >= getCount(). + * The data is preserved if newCapacity {@literal >=} getCount(). * @param newCapacity The new capacity in bytes. */ public void setCapacity(int newCapacity) { @@ -162,7 +162,7 @@ public class Buffer implements Comparable, Cloneable { public void truncate() { setCapacity(count); } - + /** * Append specified bytes to the buffer. * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Utils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Utils.java index d5be59c92c1..59e2080c3ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Utils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Utils.java @@ -28,9 +28,9 @@ import org.apache.hadoop.io.WritableComparator; import org.apache.hadoop.io.WritableUtils; /** - * Various utility functions for Hadooop record I/O runtime. + * Various utility functions for Hadoop record I/O runtime. * - * @deprecated Replaced by Avro. + * @deprecated Replaced by Avro. */ @Deprecated @InterfaceAudience.Public @@ -462,8 +462,8 @@ public class Utils { /** * Serializes a long to a binary stream with zero-compressed encoding. - * For -112 <= i <= 127, only one byte is used with the actual value. - * For other values of i, the first byte value indicates whether the + * For {@literal -112 <= i <= 127}, only one byte is used with the actual + * value. For other values of i, the first byte value indicates whether the * long is positive or negative, and the number of bytes that follow. * If the first byte value v is between -113 and -120, the following long * is positive, with number of bytes that follow are -(v+112). diff --git a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java index 78cb360c253..9d0b3a42c22 100644 --- a/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java +++ b/hadoop-tools/hadoop-ant/src/main/java/org/apache/hadoop/ant/DfsTask.java @@ -41,8 +41,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; public class DfsTask extends Task { /** - * Default sink for {@link java.lang.System.out System.out} - * and {@link java.lang.System.err System.err}. + * Default sink for {@link java.lang.System#out} + * and {@link java.lang.System#err}. */ private static final OutputStream nullOut = new OutputStream() { public void write(int b) { /* ignore */ } @@ -171,7 +171,7 @@ public class DfsTask extends Task { } /** - * Invoke {@link org.apache.hadoop.fs.FsShell#doMain FsShell.doMain} after a + * Invoke {@link org.apache.hadoop.fs.FsShell#main} after a * few cursory checks of the configuration. */ public void execute() throws BuildException { diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java index 5afc9001cbf..5f0ad83a5da 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3/S3FileSystem.java @@ -44,10 +44,9 @@ import org.apache.hadoop.io.retry.RetryProxy; import org.apache.hadoop.util.Progressable; /** - *
* A block-based {@link FileSystem} backed by * Amazon S3. - *
+ * * @see NativeS3FileSystem */ @InterfaceAudience.Public @@ -70,7 +69,6 @@ public class S3FileSystem extends FileSystem { /** * Return the protocol scheme for the FileSystem. - * * * @returns3
*/
diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
index a29c47b99f6..68195817ee4 100644
--- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
+++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/S3AFastOutputStream.java
@@ -57,11 +57,11 @@ import java.util.concurrent.ThreadPoolExecutor;
/**
* Upload files/parts asap directly from a memory buffer (instead of buffering
* to a file).
- *
+ * * Uploads are managed low-level rather than through the AWS TransferManager. * This allows for uploading each part of a multi-part upload as soon as * the bytes are in memory, rather than waiting until the file is closed. - *
+ ** Unstable: statistics and error handling might evolve */ @InterfaceStability.Unstable diff --git a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java index e490edf8a22..c34d53e4d08 100644 --- a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java +++ b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java @@ -62,24 +62,29 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; /** - *
* A {@link FileSystem} for reading and writing files stored on * Amazon S3. * Unlike {@link org.apache.hadoop.fs.s3.S3FileSystem} this implementation * stores files on S3 in their * native form so they can be read by other S3 tools. - * + *
* A note about directories. S3 of course has no "native" support for them. * The idiom we choose then is: for any directory created by this class, * we use an empty object "#{dirpath}_$folder$" as a marker. * Further, to interoperate with other S3 tools, we also accept the following: - * - an object "#{dirpath}/' denoting a directory marker - * - if there exists any objects with the prefix "#{dirpath}/", then the - * directory is said to exist - * - if both a file with the name of a directory and a marker for that - * directory exists, then the *file masks the directory*, and the directory - * is never returned. - *
+ *s3n
*/
diff --git a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
index c6ba84fc479..5dc096397d2 100644
--- a/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
+++ b/hadoop-tools/hadoop-azure/src/main/java/org/apache/hadoop/fs/azure/AzureNativeFileSystemStore.java
@@ -600,8 +600,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* Azure.
*
* @throws AzureException
- * @throws ConfigurationException
- *
*/
private void configureAzureStorageSession() throws AzureException {
@@ -705,7 +703,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* raised on errors communicating with Azure storage.
* @throws IOException
* raised on errors performing I/O or setting up the session.
- * @throws URISyntaxExceptions
+ * @throws URISyntaxException
* raised on creating mal-formed URI's.
*/
private void connectUsingAnonymousCredentials(final URI uri)
@@ -1036,7 +1034,6 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
/**
* Checks if the given key in Azure Storage should be stored as a page
* blob instead of block blob.
- * @throws URISyntaxException
*/
public boolean isPageBlobKey(String key) {
return isKeyForDirectorySet(key, pageBlobDirs);
@@ -1755,7 +1752,7 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* the path and returns a path relative to the root directory of the
* container.
*
- * @param blob
+ * @param directory
* - adjust the key to this directory to a path relative to the root
* directory
*
@@ -2142,14 +2139,10 @@ public class AzureNativeFileSystemStore implements NativeFileSystemStore {
* uses a in-order first traversal of blob directory structures to maintain
* the sorted order of the blob names.
*
- * @param dir
- * -- Azure blob directory
- *
- * @param list
- * -- a list of file metadata objects for each non-directory blob.
- *
- * @param maxListingLength
- * -- maximum length of the built up list.
+ * @param aCloudBlobDirectory Azure blob directory
+ * @param aFileMetadataList a list of file metadata objects for each
+ * non-directory blob.
+ * @param maxListingCount maximum length of the built up list.
*/
private void buildUpList(CloudBlobDirectoryWrapper aCloudBlobDirectory,
ArrayList* A {@link FileSystem} for reading and writing files stored on Windows Azure. This implementation is * blob-based and stores files on Azure in their native form so they can be read * by other Azure tools. - *
*/ @InterfaceAudience.Public @InterfaceStability.Stable @@ -218,9 +216,11 @@ public class NativeAzureFileSystem extends FileSystem { } /** - * Write to disk the information needed to redo folder rename, in JSON format. - * The file name will be wasb://{@code * { * FormatVersion: "1.0", * OperationTime: "* @throws IOException */ public void writeFile(FileSystem fs) throws IOException { @@ -913,9 +913,6 @@ public class NativeAzureFileSystem extends FileSystem { * The create also includes the name of the original key value which is * stored in the m_key member variable. This method should only be called * when the stream is closed. - * - * @param anEncodedKey - * Encoding of the original key stored in m_key member. */ private void restoreKey() throws IOException { store.rename(getEncodedKey(), getKey()); @@ -1796,7 +1793,7 @@ public class NativeAzureFileSystem extends FileSystem { * * @param permission * The permission to mask. - * @param applyDefaultUmask + * @param applyMode * Whether to also apply the default umask. * @return The masked persmission. */ @@ -2409,7 +2406,6 @@ public class NativeAzureFileSystem extends FileSystem { * recover the original key. * * @param aKey - * @param numBuckets * @return Encoded version of the original key. */ private static String encodeKey(String aKey) { diff --git a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java index 9b31dfbdd8f..8af799a48fe 100644 --- a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java +++ b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/CopyListingFileStatus.java @@ -79,7 +79,7 @@ public final class CopyListingFileStatus extends FileStatus { /** * Returns the full logical ACL. * - * @return List", @@ -239,7 +239,7 @@ public class NativeAzureFileSystem extends FileSystem { * "innerFile", * "innerFile2" * ] - * } + * } }