HADOOP-9323. Merge change 1449977 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1449979 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-02-26 00:16:37 +00:00
parent dcc8325c12
commit a3e4b7a6fb
10 changed files with 16 additions and 34 deletions

View File

@ -48,6 +48,8 @@ Release 2.0.4-beta - UNRELEASED
HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE.
(Colin Patrick McCabe via atm)
HADOOP-9323. Fix typos in API documentation. (suresh)
Release 2.0.3-alpha - 2013-02-06
INCOMPATIBLE CHANGES

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.fs;
import java.io.*;
import java.util.Arrays;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -32,7 +30,7 @@ import org.apache.hadoop.util.PureJavaCrc32;
/****************************************************************
* Abstract Checksumed FileSystem.
* It provide a basice implementation of a Checksumed FileSystem,
* It provide a basic implementation of a Checksumed FileSystem,
* which creates a checksum file for each raw file.
* It generates & verifies checksums at the client side.
*
@ -118,9 +116,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
* It verifies that data matches checksums.
*******************************************************/
private static class ChecksumFSInputChecker extends FSInputChecker {
public static final Log LOG
= LogFactory.getLog(FSInputChecker.class);
private ChecksumFileSystem fs;
private FSDataInputStream datas;
private FSDataInputStream sums;
@ -374,19 +369,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
private FSDataOutputStream sums;
private static final float CHKSUM_AS_FRACTION = 0.01f;
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,
short replication,
long blockSize,
Configuration conf)
throws IOException {
this(fs, file, overwrite,
conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT),
replication, blockSize, null);
}
public ChecksumFSOutputSummer(ChecksumFileSystem fs,
Path file,
boolean overwrite,

View File

@ -1336,7 +1336,7 @@ public final class FileContext {
*
* 2. Partially qualified URIs (eg scheme but no host)
*
* fs:///A/B/file Resolved according to the target file sytem. Eg resolving
* fs:///A/B/file Resolved according to the target file system. Eg resolving
* a symlink to hdfs:///A results in an exception because
* HDFS URIs must be fully qualified, while a symlink to
* file:///A will not since Hadoop's local file systems

View File

@ -1865,7 +1865,7 @@ public abstract class FileSystem extends Configured implements Closeable {
*
* Some file systems like LocalFileSystem have an initial workingDir
* that we use as the starting workingDir. For other file systems
* like HDFS there is no built in notion of an inital workingDir.
* like HDFS there is no built in notion of an initial workingDir.
*
* @return if there is built in notion of workingDir then it
* is returned; else a null is returned.

View File

@ -43,7 +43,7 @@ public interface PositionedReadable {
throws IOException;
/**
* Read number of bytes equalt to the length of the buffer, from a given
* Read number of bytes equal to the length of the buffer, from a given
* position within a file. This does not
* change the current offset of a file, and is thread-safe.
*/

View File

@ -79,19 +79,17 @@ public abstract class TrashPolicy extends Configured {
/**
* Get an instance of the configured TrashPolicy based on the value
* of the configuration paramater fs.trash.classname.
* of the configuration parameter fs.trash.classname.
*
* @param conf the configuration to be used
* @param fs the file system to be used
* @param home the home directory
* @return an instance of TrashPolicy
*/
public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home)
throws IOException {
Class<? extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname",
TrashPolicyDefault.class,
TrashPolicy.class);
TrashPolicy trash = (TrashPolicy) ReflectionUtils.newInstance(trashClass, conf);
public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
Class<? extends TrashPolicy> trashClass = conf.getClass(
"fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
trash.initialize(conf, fs, home); // initialize TrashPolicy
return trash;
}

View File

@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
/**
* A byte sequence that is usable as a key or value.
* It is resizable and distinguishes between the size of the seqeunce and
* It is resizable and distinguishes between the size of the sequence and
* the current capacity. The hash function is the front of the md5 of the
* buffer. The sort order is the same as memcmp.
*/

View File

@ -128,7 +128,7 @@ public class Text extends BinaryComparable
/**
* Returns the Unicode Scalar Value (32-bit integer value)
* for the character at <code>position</code>. Note that this
* method avoids using the converter or doing String instatiation
* method avoids using the converter or doing String instantiation
* @return the Unicode scalar value at position or -1
* if the position is invalid or points to a
* trailing byte
@ -527,7 +527,7 @@ public class Text extends BinaryComparable
int length = 0;
int state = LEAD_BYTE;
while (count < start+len) {
int aByte = ((int) utf8[count] & 0xFF);
int aByte = utf8[count] & 0xFF;
switch (state) {
case LEAD_BYTE:

View File

@ -192,7 +192,7 @@ public class Buffer implements Comparable, Cloneable {
int hash = 1;
byte[] b = this.get();
for (int i = 0; i < count; i++)
hash = (31 * hash) + (int)b[i];
hash = (31 * hash) + b[i];
return hash;
}

View File

@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
/**
* Interface that alll the serializers have to implement.
* Interface that all the serializers have to implement.
*
* @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
*/