svn merge -c 1310507 from trunk. FIXES: HADOOP-8014. ViewFileSystem does not correctly implement getDefaultBlockSize, getDefaultReplication, getContentSummary (John George via bobby)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1310510 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Robert Joseph Evans 2012-04-06 17:56:41 +00:00
parent 089f6ca742
commit 4d7187cd1d
7 changed files with 207 additions and 29 deletions

View File

@ -298,6 +298,9 @@ Release 0.23.3 - UNRELEASED
HADOOP-8180. Remove hsqldb since its not needed from pom.xml (Ravi Prakash
via tgraves)
HADOOP-8014. ViewFileSystem does not correctly implement getDefaultBlockSize,
getDefaultReplication, getContentSummary (John George via bobby)
Release 0.23.2 - UNRELEASED
NEW FEATURES

View File

@ -579,7 +579,8 @@ public abstract class FileSystem extends Configured implements Closeable {
*
* The FileSystem will simply return an elt containing 'localhost'.
*
* @param p path of file to get locations for
* @param p path is used to identify an FS since an FS could have
* another FS that it could be delegating the call to
* @param start offset into the given file
* @param len length for which to get locations for
*/
@ -606,6 +607,17 @@ public abstract class FileSystem extends Configured implements Closeable {
conf.getInt("io.file.buffer.size", 4096));
}
/**
* Return a set of server default configuration values
* @param p path is used to identify an FS since an FS could have
* another FS that it could be delegating the call to
* @return server default configuration values
* @throws IOException
*/
public FsServerDefaults getServerDefaults(Path p) throws IOException {
return getServerDefaults();
}
/**
* Return the fully-qualified path of path f resolving the path
* through any symlinks or mount point
@ -653,8 +665,8 @@ public abstract class FileSystem extends Configured implements Closeable {
throws IOException {
return create(f, overwrite,
getConf().getInt("io.file.buffer.size", 4096),
getDefaultReplication(),
getDefaultBlockSize());
getDefaultReplication(f),
getDefaultBlockSize(f));
}
/**
@ -668,8 +680,8 @@ public abstract class FileSystem extends Configured implements Closeable {
throws IOException {
return create(f, true,
getConf().getInt("io.file.buffer.size", 4096),
getDefaultReplication(),
getDefaultBlockSize(), progress);
getDefaultReplication(f),
getDefaultBlockSize(f), progress);
}
/**
@ -683,7 +695,7 @@ public abstract class FileSystem extends Configured implements Closeable {
return create(f, true,
getConf().getInt("io.file.buffer.size", 4096),
replication,
getDefaultBlockSize());
getDefaultBlockSize(f));
}
/**
@ -699,7 +711,7 @@ public abstract class FileSystem extends Configured implements Closeable {
return create(f, true,
getConf().getInt("io.file.buffer.size", 4096),
replication,
getDefaultBlockSize(), progress);
getDefaultBlockSize(f), progress);
}
@ -715,8 +727,8 @@ public abstract class FileSystem extends Configured implements Closeable {
int bufferSize
) throws IOException {
return create(f, overwrite, bufferSize,
getDefaultReplication(),
getDefaultBlockSize());
getDefaultReplication(f),
getDefaultBlockSize(f));
}
/**
@ -733,8 +745,8 @@ public abstract class FileSystem extends Configured implements Closeable {
Progressable progress
) throws IOException {
return create(f, overwrite, bufferSize,
getDefaultReplication(),
getDefaultBlockSize(), progress);
getDefaultReplication(f),
getDefaultBlockSize(f), progress);
}
@ -1916,11 +1928,31 @@ public abstract class FileSystem extends Configured implements Closeable {
return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
}
/** Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time. The given path will be used to
* locate the actual filesystem. The full path does not have to exist.
* @param f path of file
* @return the default block size for the path's filesystem
*/
public long getDefaultBlockSize(Path f) {
return getDefaultBlockSize();
}
/**
* Get the default replication.
*/
public short getDefaultReplication() { return 1; }
/**
* Get the default replication for a path. The given path will be used to
* locate the actual filesystem. The full path does not have to exist.
* @param path of the file
* @return default replication for the path's filesystem
*/
public short getDefaultReplication(Path path) {
return getDefaultReplication();
}
/**
* Return a file status object that represents the path.
* @param f The path we want information from

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Progressable;
@ -267,6 +268,7 @@ public class FilterFileSystem extends FileSystem {
return fs.mkdirs(f, permission);
}
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
@ -336,19 +338,42 @@ public class FilterFileSystem extends FileSystem {
return fs.getUsed();
}
/** Return the number of bytes that large input files should be optimally
* be split into to minimize i/o time. */
@Override
public long getDefaultBlockSize() {
return fs.getDefaultBlockSize();
}
/**
* Get the default replication.
*/
@Override
public short getDefaultReplication() {
return fs.getDefaultReplication();
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return fs.getServerDefaults();
}
// path variants delegate to underlying filesystem
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
return fs.getContentSummary(f);
}
@Override
public long getDefaultBlockSize(Path f) {
return fs.getDefaultBlockSize(f);
}
@Override
public short getDefaultReplication(Path f) {
return fs.getDefaultReplication(f);
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
return fs.getServerDefaults(f);
}
/**
* Get file status.
*/

View File

@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum;
@ -208,11 +209,6 @@ class ChRootedFileSystem extends FilterFileSystem {
return super.getStatus(fullPath(p));
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return super.getServerDefaults();
}
@Override
public FileStatus[] listStatus(final Path f)
throws IOException {
@ -273,4 +269,42 @@ class ChRootedFileSystem extends FilterFileSystem {
public Path resolvePath(final Path p) throws IOException {
return super.resolvePath(fullPath(p));
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
return super.getContentSummary(fullPath(f));
}
private static Path rootPath = new Path(Path.SEPARATOR);
@Override
public long getDefaultBlockSize() {
return getDefaultBlockSize(fullPath(rootPath));
}
@Override
public long getDefaultBlockSize(Path f) {
return super.getDefaultBlockSize(fullPath(f));
}
@Override
public short getDefaultReplication() {
return getDefaultReplication(fullPath(rootPath));
}
@Override
public short getDefaultReplication(Path f) {
return super.getDefaultReplication(fullPath(f));
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
return getServerDefaults(fullPath(rootPath));
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
return super.getServerDefaults(fullPath(f));
}
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -41,6 +42,7 @@ import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
@ -470,6 +472,57 @@ public class ViewFileSystem extends FileSystem {
}
}
@Override
public long getDefaultBlockSize() {
throw new NotInMountpointException("getDefaultBlockSize");
}
@Override
public short getDefaultReplication() {
throw new NotInMountpointException("getDefaultReplication");
}
@Override
public FsServerDefaults getServerDefaults() throws IOException {
throw new NotInMountpointException("getServerDefaults");
}
@Override
public long getDefaultBlockSize(Path f) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getDefaultBlockSize(res.remainingPath);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(f, "getDefaultBlockSize");
}
}
@Override
public short getDefaultReplication(Path f) {
try {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getDefaultReplication(res.remainingPath);
} catch (FileNotFoundException e) {
throw new NotInMountpointException(f, "getDefaultReplication");
}
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getServerDefaults(res.remainingPath);
}
@Override
public ContentSummary getContentSummary(Path f) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getContentSummary(res.remainingPath);
}
@Override
public void setWriteChecksum(final boolean writeChecksum) {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
@ -742,5 +795,20 @@ public class ViewFileSystem extends FileSystem {
public void setVerifyChecksum(boolean verifyChecksum) {
// Noop for viewfs
}
@Override
public FsServerDefaults getServerDefaults(Path f) throws IOException {
throw new NotInMountpointException(f, "getServerDefaults");
}
@Override
public long getDefaultBlockSize(Path f) {
throw new NotInMountpointException(f, "getDefaultBlockSize");
}
@Override
public short getDefaultReplication(Path f) {
throw new NotInMountpointException(f, "getDefaultReplication");
}
}
}

View File

@ -36,6 +36,7 @@ public final class FileSystemTestHelper {
System.getProperty("test.build.data", "target/test/data") + "/test";
private static final int DEFAULT_BLOCK_SIZE = 1024;
private static final int DEFAULT_NUM_BLOCKS = 2;
private static final short DEFAULT_NUM_REPL = 1;
private static String absTestRootDir = null;
/** Hidden constructor */
@ -99,9 +100,9 @@ public final class FileSystemTestHelper {
* Create files with numBlocks blocks each with block size blockSize.
*/
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, boolean createParent) throws IOException {
int blockSize, short numRepl, boolean createParent) throws IOException {
FSDataOutputStream out =
fSys.create(path, false, 4096, fSys.getDefaultReplication(), blockSize );
fSys.create(path, false, 4096, numRepl, blockSize );
byte[] data = getFileData(numBlocks, blockSize);
out.write(data, 0, data.length);
@ -109,13 +110,19 @@ public final class FileSystemTestHelper {
return data.length;
}
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize, boolean createParent) throws IOException {
return createFile(fSys, path, numBlocks, blockSize, fSys.getDefaultReplication(), true);
}
public static long createFile(FileSystem fSys, Path path, int numBlocks,
int blockSize) throws IOException {
return createFile(fSys, path, numBlocks, blockSize, true);
}
}
public static long createFile(FileSystem fSys, Path path) throws IOException {
return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, true);
return createFile(fSys, path, DEFAULT_NUM_BLOCKS, DEFAULT_BLOCK_SIZE, DEFAULT_NUM_REPL, true);
}
public static long createFile(FileSystem fSys, String name) throws IOException {

View File

@ -23,6 +23,7 @@ import java.net.URI;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
@ -171,6 +172,14 @@ public class TestChRootedFileSystem {
Assert.assertTrue(fSysTarget.isDirectory(new Path(chrootedTo,"newDir/dirFooBar")));
}
@Test
public void testGetContentSummary() throws IOException {
// GetContentSummary of a dir
fSys.mkdirs(new Path("/newDir/dirFoo"));
ContentSummary cs = fSys.getContentSummary(new Path("/newDir/dirFoo"));
Assert.assertEquals(-1L, cs.getQuota());
Assert.assertEquals(-1L, cs.getSpaceQuota());
}
/**
* We would have liked renames across file system to fail but