diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
index 13632fb1bcd..cb99c88112a 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/AuthenticationException.java
@@ -14,7 +14,7 @@
package org.apache.hadoop.security.authentication.client;
/**
- * Exception thrown when an authentication error occurrs.
+ * Exception thrown when an authentication error occurs.
*/
public class AuthenticationException extends Exception {
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
index 46d94b88dec..29ca9cf93ec 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/PseudoAuthenticator.java
@@ -57,7 +57,7 @@ public class PseudoAuthenticator implements Authenticator {
* If the response is successful it will update the authentication token.
*
* @param url the URl to authenticate against.
- * @param token the authencation token being used for the user.
+ * @param token the authentication token being used for the user.
*
* @throws IOException if an IO error occurred.
* @throws AuthenticationException if an authentication error occurred.
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 1234ea08a5b..3c636ecd4ca 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -198,6 +198,9 @@ Trunk (Unreleased)
HADOOP-11781. fix race conditions and add URL support to
smart-apply-patch.sh (Raymie Stata via aw)
+ HADOOP-11850. Typos in hadoop-common java docs. (Surendra Singh Lilhore
+ via jghoman)
+
BUG FIXES
HADOOP-11473. test-patch says "-1 overall" even when all checks are +1
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
index 4855e0c3d2d..2eb8b959b2e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BufferedFSInputStream.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
- * A class optimizes reading from FSInputStream by bufferring
+ * A class that optimizes reading from FSInputStream by buffering
*/
@InterfaceAudience.Private
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
index dddf0ce815f..3b8ecea5a90 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java
@@ -662,7 +662,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
* @param inPos the position of the beginning of the bad data in the file
* @param sums the stream open on the checksum file
* @param sumsPos the position of the beginning of the bad data in the checksum file
- * @return if retry is neccessary
+ * @return if retry is necessary
*/
public boolean reportChecksumFailure(Path f, FSDataInputStream in,
long inPos, FSDataInputStream sums, long sumsPos) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
index 7dc4a809cd9..ba0f1dd3677 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFs.java
@@ -507,7 +507,7 @@ public abstract class ChecksumFs extends FilterFs {
* @param sums the stream open on the checksum file
* @param sumsPos the position of the beginning of the bad data in the
* checksum file
- * @return if retry is neccessary
+ * @return if retry is necessary
*/
public boolean reportChecksumFailure(Path f, FSDataInputStream in,
long inPos, FSDataInputStream sums, long sumsPos) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
index ccd6960dd0f..678ce7f37dd 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ContentSummary.java
@@ -175,7 +175,7 @@ public class ContentSummary implements Writable{
/** Return the directory quota */
public long getQuota() {return quota;}
- /** Retuns storage space consumed */
+ /** Returns storage space consumed */
public long getSpaceConsumed() {return spaceConsumed;}
/** Returns storage space quota */
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
index a88938e6f36..889ccc1486e 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSInputChecker.java
@@ -112,7 +112,7 @@ abstract public class FSInputChecker extends FSInputStream {
* for sequential reading.
*
* @param pos chunkPos
- * @param buf desitination buffer
+ * @param buf destination buffer
* @param offset offset in buf at which to store data
* @param len maximum number of bytes to read
* @param checksum the data buffer into which to write checksums
@@ -123,7 +123,7 @@ abstract public class FSInputChecker extends FSInputStream {
/** Return position of beginning of chunk containing pos.
*
- * @param pos a postion in the file
+ * @param pos a position in the file
* @return the starting position of the chunk which contains the byte
*/
abstract protected long getChunkPosition(long pos);
@@ -388,7 +388,7 @@ abstract public class FSInputChecker extends FSInputStream {
* This produces no exception and an attempt to read from
* the stream will result in -1 indicating the end of the file.
*
- * @param pos the postion to seek to.
+ * @param pos the position to seek to.
* @exception IOException if an I/O error occurs.
* ChecksumException if the chunk to seek to is corrupted
*/
@@ -423,7 +423,7 @@ abstract public class FSInputChecker extends FSInputStream {
* stm
*
* @param stm an input stream
- * @param buf destiniation buffer
+ * @param buf destination buffer
* @param offset offset at which to store data
* @param len number of bytes to read
* @return actual number of bytes read
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index d2998b6e9d1..bdc55853d8b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -92,7 +92,7 @@ abstract public class FSOutputSummer extends OutputStream {
* in a checksum chunk are in the buffer. If the buffer is empty and
* requested length is at least as large as the size of next checksum chunk
* size, this method will checksum and write the chunk directly
- * to the underlying output stream. Thus it avoids uneccessary data copy.
+ * to the underlying output stream. Thus it avoids unnecessary data copy.
*
* @param b the data.
* @param off the start offset in the data.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index aad8be9e2cf..0b5863b9764 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -342,7 +342,7 @@ public class FileContext {
*
* @param defFS
* @param aConf
- * @return new FileContext with specifed FS as default.
+ * @return new FileContext with specified FS as default.
*/
public static FileContext getFileContext(final AbstractFileSystem defFS,
final Configuration aConf) {
@@ -612,7 +612,7 @@ public class FileContext {
* @param opts file creation options; see {@link Options.CreateOpts}.
*
what
in the backing
+ * Finds any occurrence of what
in the backing
* buffer, starting as position start
. The starting
* position is measured in bytes and the return value is in
* terms of byte position in the buffer. The backing buffer is
* not converted to a string for this operation.
- * @return byte position of the first occurence of the search
+ * @return byte position of the first occurrence of the search
* string in the UTF-8 buffer or -1 if not found
*/
public int find(String what, int start) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
index 35c6ef9a52a..1754b8d06f6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableComparator.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.util.ReflectionUtils;
/** A Comparator for {@link WritableComparable}s.
*
- * This base implemenation uses the natural ordering. To define alternate + *
This base implementation uses the natural ordering. To define alternate * orderings, override {@link #compare(WritableComparable,WritableComparable)}. * *
One may optimize compare-intensive operations by overriding diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java index a11a86008d9..ae615dde6f0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/WritableUtils.java @@ -223,7 +223,7 @@ public final class WritableUtils { } /** - * Make a copy of the writable object using serialiation to a buffer + * Make a copy of the writable object using serialization to a buffer * @param dst the object to copy from * @param src the object to copy into, which is destroyed * @throws IOException diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java index 57fc07b617a..1f7632b047b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/bzip2/CBZip2InputStream.java @@ -1219,7 +1219,7 @@ public class CBZip2InputStream extends InputStream implements BZip2Constants { * Initializes the {@link #tt} array. * * This method is called when the required length of the array is known. - * I don't initialize it at construction time to avoid unneccessary + * I don't initialize it at construction time to avoid unnecessary * memory allocation when compressing small files. */ final int[] initTT(int length) { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java index 5fbd508c58d..e7f4c8319e3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/file/tfile/BoundedRangeFileInputStream.java @@ -42,7 +42,7 @@ class BoundedRangeFileInputStream extends InputStream { * @param in * The FSDataInputStream we connect to. * @param offset - * Begining offset of the region. + * Beginning offset of the region. * @param length * Length of the region. * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java index bd68ecb1ad3..715459ab2f3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/FairCallQueueMXBean.java @@ -19,7 +19,7 @@ package org.apache.hadoop.ipc; public interface FairCallQueueMXBean { - // Get the size of each subqueue, the index corrosponding to the priority + // Get the size of each subqueue, the index corresponding to the priority // level. int[] getQueueSizes(); long[] getOverflowedCalls();