HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)

This commit is contained in:
yliu 2015-10-22 10:42:18 +08:00
parent 2d577115a5
commit 7579ec5bf5
12 changed files with 307 additions and 14 deletions

View File

@ -719,6 +719,8 @@ Release 2.8.0 - UNRELEASED
TestBlockManager.testBlocksAreNotUnderreplicatedInSingleRack. TestBlockManager.testBlocksAreNotUnderreplicatedInSingleRack.
(Masatake Iwasaki via wang) (Masatake Iwasaki via wang)
HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.PathIsNotDirectoryException;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
@ -50,6 +51,9 @@ public class FSDirAttrOp {
FSDirectory fsd, final String srcArg, FsPermission permission) FSDirectory fsd, final String srcArg, FsPermission permission)
throws IOException { throws IOException {
String src = srcArg; String src = srcArg;
if (FSDirectory.isExactReservedName(src)) {
throw new InvalidPathException(src);
}
FSPermissionChecker pc = fsd.getPermissionChecker(); FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
INodesInPath iip; INodesInPath iip;
@ -69,6 +73,9 @@ public class FSDirAttrOp {
static HdfsFileStatus setOwner( static HdfsFileStatus setOwner(
FSDirectory fsd, String src, String username, String group) FSDirectory fsd, String src, String username, String group)
throws IOException { throws IOException {
if (FSDirectory.isExactReservedName(src)) {
throw new InvalidPathException(src);
}
FSPermissionChecker pc = fsd.getPermissionChecker(); FSPermissionChecker pc = fsd.getPermissionChecker();
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
INodesInPath iip; INodesInPath iip;

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException; import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
@ -177,6 +178,10 @@ class FSDirDeleteOp {
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src); NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
} }
if (FSDirectory.isExactReservedName(src)) {
throw new InvalidPathException(src);
}
FSDirectory fsd = fsn.getFSDirectory(); FSDirectory fsd = fsn.getFSDirectory();
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
List<INode> removedINodes = new ChunkedArrayList<>(); List<INode> removedINodes = new ChunkedArrayList<>();

View File

@ -499,6 +499,12 @@ class FSDirRenameOp {
+ error); + error);
throw new IOException(error); throw new IOException(error);
} }
if (FSDirectory.isExactReservedName(src)
|| FSDirectory.isExactReservedName(dst)) {
error = "Cannot rename to or from /.reserved";
throw new InvalidPathException(error);
}
} }
private static void validateOverwrite( private static void validateOverwrite(

View File

@ -226,6 +226,9 @@ class FSDirStatAndListingOp {
throws IOException { throws IOException {
String srcs = FSDirectory.normalizePath(src); String srcs = FSDirectory.normalizePath(src);
final boolean isRawPath = FSDirectory.isReservedRawName(src); final boolean isRawPath = FSDirectory.isReservedRawName(src);
if (FSDirectory.isExactReservedName(srcs)) {
return getReservedListing(fsd);
}
fsd.readLock(); fsd.readLock();
try { try {
@ -335,6 +338,15 @@ class FSDirStatAndListingOp {
listing, snapshots.size() - skipSize - numOfListing); listing, snapshots.size() - skipSize - numOfListing);
} }
/**
* Get a listing of the /.reserved directory.
* @param fsd FSDirectory
* @return listing containing child directories of /.reserved
*/
private static DirectoryListing getReservedListing(FSDirectory fsd) {
return new DirectoryListing(fsd.getReservedStatuses(), 0);
}
/** Get the file info for a specific file. /** Get the file info for a specific file.
* @param fsd FSDirectory * @param fsd FSDirectory
* @param src The string representation of the path to the file * @param src The string representation of the path to the file
@ -371,6 +383,10 @@ class FSDirStatAndListingOp {
FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath) FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath)
throws IOException { throws IOException {
String srcs = FSDirectory.normalizePath(src); String srcs = FSDirectory.normalizePath(src);
if (FSDirectory.isExactReservedName(src)) {
return FSDirectory.DOT_RESERVED_STATUS;
}
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) { if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
if (fsd.getINode4DotSnapshot(srcs) != null) { if (fsd.getINode4DotSnapshot(srcs) != null) {
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null, return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,

View File

@ -42,7 +42,8 @@ class FSDirSymlinkOp {
if (!DFSUtil.isValidName(link)) { if (!DFSUtil.isValidName(link)) {
throw new InvalidPathException("Invalid link name: " + link); throw new InvalidPathException("Invalid link name: " + link);
} }
if (FSDirectory.isReservedName(target) || target.isEmpty()) { if (FSDirectory.isReservedName(target) || target.isEmpty()
|| FSDirectory.isExactReservedName(target)) {
throw new InvalidPathException("Invalid target name: " + target); throw new InvalidPathException("Invalid target name: " + target);
} }

View File

@ -26,6 +26,7 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileEncryptionInfo; import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
@ -347,6 +348,12 @@ class FSDirWriteFileOp {
" already exists as a directory"); " already exists as a directory");
} }
if (FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src)
&& !FSDirectory.isReservedRawName(src)
&& !FSDirectory.isReservedInodesName(src)) ) {
throw new InvalidPathException(src);
}
final INodeFile myFile = INodeFile.valueOf(inode, src, true); final INodeFile myFile = INodeFile.valueOf(inode, src, true);
if (fsd.isPermissionEnabled()) { if (fsd.isPermissionEnabled()) {
if (overwrite && myFile != null) { if (overwrite && myFile != null) {

View File

@ -122,6 +122,11 @@ public class FSDirectory implements Closeable {
public final static byte[] DOT_INODES = public final static byte[] DOT_INODES =
DFSUtil.string2Bytes(DOT_INODES_STRING); DFSUtil.string2Bytes(DOT_INODES_STRING);
public final static HdfsFileStatus DOT_RESERVED_STATUS =
new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
INodeDirectory rootDir; INodeDirectory rootDir;
private final FSNamesystem namesystem; private final FSNamesystem namesystem;
private volatile boolean skipQuotaCheck = false; //skip while consuming edits private volatile boolean skipQuotaCheck = false; //skip while consuming edits
@ -169,6 +174,8 @@ public class FSDirectory implements Closeable {
private final FSEditLog editLog; private final FSEditLog editLog;
private HdfsFileStatus[] reservedStatuses;
private INodeAttributeProvider attributeProvider; private INodeAttributeProvider attributeProvider;
public void setINodeAttributeProvider(INodeAttributeProvider provider) { public void setINodeAttributeProvider(INodeAttributeProvider provider) {
@ -313,6 +320,43 @@ public class FSDirectory implements Closeable {
DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT); DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
} }
/**
* Get HdfsFileStatuses of the reserved paths: .inodes and raw.
*
* @return Array of HdfsFileStatus
*/
HdfsFileStatus[] getReservedStatuses() {
Preconditions.checkNotNull(reservedStatuses, "reservedStatuses should "
+ " not be null. It is populated when FSNamesystem loads FS image."
+ " It has to be set at this time instead of initialization time"
+ " because CTime is loaded during FSNamesystem#loadFromDisk.");
return reservedStatuses;
}
/**
* Create HdfsFileStatuses of the reserved paths: .inodes and raw.
* These statuses are solely for listing purpose. All other operations
* on the reserved dirs are disallowed.
* Operations on sub directories are resolved by
* {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
* and conducted directly, without the need to check the reserved dirs.
*
* This method should only be invoked once during namenode initialization.
*
* @param cTime CTime of the file system
* @return Array of HdfsFileStatus
*/
void createReservedStatuses(long cTime) {
HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
new FsPermission((short) 0770), null, supergroup, null,
DOT_INODES, -1L, 0, null,
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
new FsPermission((short) 0770), null, supergroup, null, RAW, -1L,
0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
reservedStatuses = new HdfsFileStatus[] { inodes, raw };
}
FSNamesystem getFSNamesystem() { FSNamesystem getFSNamesystem() {
return namesystem; return namesystem;
} }
@ -1263,11 +1307,20 @@ public class FSDirectory implements Closeable {
return src.startsWith(DOT_RESERVED_PATH_PREFIX + Path.SEPARATOR); return src.startsWith(DOT_RESERVED_PATH_PREFIX + Path.SEPARATOR);
} }
public static boolean isExactReservedName(String src) {
return CHECK_RESERVED_FILE_NAMES && src.equals(DOT_RESERVED_PATH_PREFIX);
}
static boolean isReservedRawName(String src) { static boolean isReservedRawName(String src) {
return src.startsWith(DOT_RESERVED_PATH_PREFIX + return src.startsWith(DOT_RESERVED_PATH_PREFIX +
Path.SEPARATOR + RAW_STRING); Path.SEPARATOR + RAW_STRING);
} }
static boolean isReservedInodesName(String src) {
return src.startsWith(DOT_RESERVED_PATH_PREFIX +
Path.SEPARATOR + DOT_INODES_STRING);
}
/** /**
* Resolve a /.reserved/... path to a non-reserved path. * Resolve a /.reserved/... path to a non-reserved path.
* <p/> * <p/>
@ -1318,9 +1371,15 @@ public class FSDirectory implements Closeable {
/* It's /.reserved/raw so strip off the /.reserved/raw prefix. */ /* It's /.reserved/raw so strip off the /.reserved/raw prefix. */
if (nComponents == 3) { if (nComponents == 3) {
return Path.SEPARATOR; return Path.SEPARATOR;
} else {
if (nComponents == 4
&& Arrays.equals(DOT_RESERVED, pathComponents[3])) {
/* It's /.reserved/raw/.reserved so don't strip */
return src;
} else { } else {
return constructRemainingPath("", pathComponents, 3); return constructRemainingPath("", pathComponents, 3);
} }
}
} else { } else {
/* It's some sort of /.reserved/<unknown> path. Ignore it. */ /* It's some sort of /.reserved/<unknown> path. Ignore it. */
return src; return src;

View File

@ -672,6 +672,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
if (nnMetrics != null) { if (nnMetrics != null) {
nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage); nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
} }
namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime());
return namesystem; return namesystem;
} }

View File

@ -1193,10 +1193,6 @@ public class TestGlobPaths {
Assert.assertEquals(reservedRoot, Assert.assertEquals(reservedRoot,
TestPath.mergeStatuses(wrap. TestPath.mergeStatuses(wrap.
globStatus(new Path(reservedRoot), new AcceptAllPathFilter()))); globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
// These inodes don't show up via listStatus.
Assert.assertEquals("",
TestPath.mergeStatuses(wrap.
globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
} }
} }

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica; import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.tools.DFSAdmin; import org.apache.hadoop.hdfs.tools.DFSAdmin;
import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.IOUtils;
@ -3025,4 +3026,197 @@ public class TestDFSShell {
public void testNoTrashConfig() throws Exception { public void testNoTrashConfig() throws Exception {
deleteFileUsingTrash(false, false); deleteFileUsingTrash(false, false);
} }
@Test (timeout = 30000)
public void testListReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
FsShell shell = new FsShell();
shell.setConf(conf);
FileStatus test = fs.getFileStatus(new Path("/.reserved"));
assertEquals(FSDirectory.DOT_RESERVED_STRING, test.getPath().getName());
// Listing /.reserved/ should show 2 items: raw and .inodes
FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
assertEquals(2, stats.length);
assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
stats[0].getGroup());
assertEquals("raw", stats[1].getPath().getName());
assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
stats[1].getGroup());
// Listing / should not show /.reserved
stats = fs.listStatus(new Path("/"));
assertEquals(0, stats.length);
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
runCmd(shell, "-ls", "/.reserved");
assertEquals(0, baos.toString().length());
runCmd(shell, "-ls", "/.reserved/raw/.reserved");
assertTrue(baos.toString().contains("No such file or directory"));
System.setErr(syserr);
cluster.shutdown();
}
@Test (timeout = 30000)
public void testMkdirReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
try {
fs.mkdirs(new Path("/.reserved"));
fail("Can't mkdir /.reserved");
} catch (Exception e) {
// Expected, HadoopIllegalArgumentException thrown from remote
assertTrue(e.getMessage().contains("\".reserved\" is reserved"));
}
cluster.shutdown();
}
@Test (timeout = 30000)
public void testRmReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
try {
fs.delete(new Path("/.reserved"), true);
fail("Can't delete /.reserved");
} catch (Exception e) {
// Expected, InvalidPathException thrown from remote
assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
}
cluster.shutdown();
}
@Test //(timeout = 30000)
public void testCopyReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
localFile.createNewFile();
final String localfilepath =
new Path(localFile.getAbsolutePath()).toUri().toString();
try {
fs.copyFromLocalFile(new Path(localfilepath), new Path("/.reserved"));
fail("Can't copyFromLocal to /.reserved");
} catch (Exception e) {
// Expected, InvalidPathException thrown from remote
assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
}
final String testdir = System.getProperty("test.build.data")
+ "/TestDFSShell-testCopyReserved";
final Path hdfsTestDir = new Path(testdir);
writeFile(fs, new Path(testdir, "testFileForPut"));
final Path src = new Path(hdfsTestDir, "srcfile");
fs.create(src).close();
assertTrue(fs.exists(src));
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
FsShell shell = new FsShell();
shell.setConf(conf);
runCmd(shell, "-cp", src.toString(), "/.reserved");
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
System.setErr(syserr);
cluster.shutdown();
}
@Test (timeout = 30000)
public void testChmodReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
FsShell shell = new FsShell();
shell.setConf(conf);
runCmd(shell, "-chmod", "777", "/.reserved");
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
System.setErr(syserr);
cluster.shutdown();
}
@Test (timeout = 30000)
public void testChownReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
// runCmd prints error into System.err, thus verify from there.
PrintStream syserr = System.err;
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
PrintStream ps = new PrintStream(baos);
System.setErr(ps);
FsShell shell = new FsShell();
shell.setConf(conf);
runCmd(shell, "-chown", "user1", "/.reserved");
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
System.setErr(syserr);
cluster.shutdown();
}
@Test (timeout = 30000)
public void testSymLinkReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fs = cluster.getFileSystem();
try {
fs.createSymlink(new Path("/.reserved"), new Path("/rl1"), false);
fail("Can't create symlink to /.reserved");
} catch (Exception e) {
// Expected, InvalidPathException thrown from remote
assertTrue(e.getMessage().contains("Invalid target name: /.reserved"));
}
cluster.shutdown();
}
@Test (timeout = 30000)
public void testSnapshotReserved() throws IOException {
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster =
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
DistributedFileSystem fs = cluster.getFileSystem();
final Path reserved = new Path("/.reserved");
try {
fs.allowSnapshot(reserved);
fail("Can't allow snapshot on /.reserved");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
try {
fs.createSnapshot(reserved, "snap");
fail("Can't create snapshot on /.reserved");
} catch (FileNotFoundException e) {
assertTrue(e.getMessage().contains("Directory does not exist"));
}
cluster.shutdown();
}
} }

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsAdmin; import org.apache.hadoop.hdfs.client.HdfsAdmin;
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager; import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level; import org.apache.log4j.Level;
@ -304,14 +305,12 @@ public class TestReservedRawPaths {
DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED); DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
/* /*
* Ensure that you can't list /.reserved. Ever. * Ensure that you can list /.reserved, with results: raw and .inodes
*/ */
try { FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
fs.listStatus(new Path("/.reserved")); assertEquals(2, stats.length);
fail("expected FNFE"); assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
} catch (FileNotFoundException e) { assertEquals("raw", stats[1].getPath().getName());
assertExceptionContains("/.reserved does not exist", e);
}
try { try {
fs.listStatus(new Path("/.reserved/.inodes")); fs.listStatus(new Path("/.reserved/.inodes"));