HADOOP-6840. svn merge -c 1212062 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1212072 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2011-12-08 19:45:09 +00:00
parent 24e3d83445
commit 8ebf00f90f
4 changed files with 125 additions and 1 deletions

View File

@ -30,6 +30,9 @@ Release 0.23.1 - Unreleased
HADOOP-7877. Update balancer CLI usage documentation to include the new HADOOP-7877. Update balancer CLI usage documentation to include the new
-policy option. (szetszwo) -policy option. (szetszwo)
HADOOP-6840. Support non-recursive create() in FileSystem and
SequenceFile.Writer. (jitendra and eli via eli)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -829,6 +829,53 @@ public abstract class FileSystem extends Configured implements Closeable {
} }
} }
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
* @param f the file name to open
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f,
boolean overwrite,
int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return this.createNonRecursive(f, FsPermission.getDefault(),
overwrite, bufferSize, replication, blockSize, progress);
}
/**
* Opens an FSDataOutputStream at the indicated Path with write-progress
* reporting. Same as create(), except fails if parent directory doesn't
* already exist.
* @param f the file name to open
* @param permission
* @param overwrite if a file with this name already exists, then if true,
* the file will be overwritten, and if false an error will be thrown.
* @param bufferSize the size of the buffer to be used.
* @param replication required block replication for the file.
* @param blockSize
* @param progress
* @throws IOException
* @see #setPermission(Path, FsPermission)
* @deprecated API only for 0.20-append
*/
@Deprecated
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite,
int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
throw new IOException("createNonRecursive unsupported for this filesystem");
}
/** /**
* Creates the given Path as a brand-new zero-length file. If * Creates the given Path as a brand-new zero-length file. If

View File

@ -25,6 +25,7 @@ import java.security.MessageDigest;
import org.apache.commons.logging.*; import org.apache.commons.logging.*;
import org.apache.hadoop.util.Options; import org.apache.hadoop.util.Options;
import org.apache.hadoop.fs.*; import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.io.compress.CodecPool; import org.apache.hadoop.io.compress.CodecPool;
import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionInputStream; import org.apache.hadoop.io.compress.CompressionInputStream;
@ -440,6 +441,67 @@ public class SequenceFile {
Writer.metadata(metadata)); Writer.metadata(metadata));
} }
/**
* Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param bufferSize buffer size for the underlaying outputstream.
* @param replication replication factor for the file.
* @param blockSize block size for the file.
* @param createParent create parent directory if non-existent
* @param compressionType The compression type.
* @param codec The compression codec.
* @param metadata The metadata of the file.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
*/
@Deprecated
public static Writer
createWriter(FileSystem fs, Configuration conf, Path name,
Class keyClass, Class valClass, int bufferSize,
short replication, long blockSize, boolean createParent,
CompressionType compressionType, CompressionCodec codec,
Metadata metadata) throws IOException {
return createWriter(FileContext.getFileContext(fs.getUri(), conf),
conf, name, keyClass, valClass, compressionType, codec,
metadata, EnumSet.of(CreateFlag.CREATE),
CreateOpts.bufferSize(bufferSize),
createParent ? CreateOpts.createParent()
: CreateOpts.donotCreateParent(),
CreateOpts.repFac(replication),
CreateOpts.blockSize(blockSize)
);
}
/**
* Construct the preferred type of SequenceFile Writer.
* @param fc The context for the specified file.
* @param conf The configuration.
* @param name The name of the file.
* @param keyClass The 'key' type.
* @param valClass The 'value' type.
* @param compressionType The compression type.
* @param codec The compression codec.
* @param metadata The metadata of the file.
* @param createFlag gives the semantics of create: overwrite, append etc.
* @param opts file creation options; see {@link CreateOpts}.
* @return Returns the handle to the constructed SequenceFile Writer.
* @throws IOException
*/
public static Writer
createWriter(FileContext fc, Configuration conf, Path name,
Class keyClass, Class valClass,
CompressionType compressionType, CompressionCodec codec,
Metadata metadata,
final EnumSet<CreateFlag> createFlag, CreateOpts... opts)
throws IOException {
return createWriter(conf, fc.create(name, createFlag, opts),
keyClass, valClass, compressionType, codec, metadata).ownStream();
}
/** /**
* Construct the preferred type of SequenceFile Writer. * Construct the preferred type of SequenceFile Writer.
* @param fs The configured filesystem. * @param fs The configured filesystem.
@ -1063,6 +1125,8 @@ public class SequenceFile {
boolean isCompressed() { return compress != CompressionType.NONE; } boolean isCompressed() { return compress != CompressionType.NONE; }
boolean isBlockCompressed() { return compress == CompressionType.BLOCK; } boolean isBlockCompressed() { return compress == CompressionType.BLOCK; }
Writer ownStream() { this.ownOutputStream = true; return this; }
/** Write and flush the file header. */ /** Write and flush the file header. */
private void writeFileHeader() private void writeFileHeader()
throws IOException { throws IOException {

View File

@ -29,7 +29,6 @@ import org.apache.commons.logging.Log;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.Options.CreateOpts; import org.apache.hadoop.fs.Options.CreateOpts;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Progressable;
@ -49,6 +48,17 @@ public class TestFilterFileSystem extends TestCase {
public boolean isDirectory(Path f) { return false; } public boolean isDirectory(Path f) { return false; }
public boolean isFile(Path f) { return false; } public boolean isFile(Path f) { return false; }
public boolean createNewFile(Path f) { return false; } public boolean createNewFile(Path f) { return false; }
public FSDataOutputStream createNonRecursive(Path f,
boolean overwrite,
int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
public FSDataOutputStream createNonRecursive(Path f, FsPermission permission,
boolean overwrite, int bufferSize, short replication, long blockSize,
Progressable progress) throws IOException {
return null;
}
public boolean mkdirs(Path f) { return false; } public boolean mkdirs(Path f) { return false; }
public FSDataInputStream open(Path f) { return null; } public FSDataInputStream open(Path f) { return null; }
public FSDataOutputStream create(Path f) { return null; } public FSDataOutputStream create(Path f) { return null; }