Revert "HDFS-7087. Ability to list /.reserved. Contributed by Xiao Chen."
This reverts commit ed731f79d9
.
This commit is contained in:
parent
ed731f79d9
commit
2d577115a5
|
@ -719,8 +719,6 @@ Release 2.8.0 - UNRELEASED
|
|||
TestBlockManager.testBlocksAreNotUnderreplicatedInSingleRack.
|
||||
(Masatake Iwasaki via wang)
|
||||
|
||||
HDFS-7087. Ability to list /.reserved. (Xiao Chen via wang)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.PathIsNotDirectoryException;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
|
@ -51,9 +50,6 @@ public class FSDirAttrOp {
|
|||
FSDirectory fsd, final String srcArg, FsPermission permission)
|
||||
throws IOException {
|
||||
String src = srcArg;
|
||||
if (FSDirectory.isExactReservedName(src)) {
|
||||
throw new InvalidPathException(src);
|
||||
}
|
||||
FSPermissionChecker pc = fsd.getPermissionChecker();
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
INodesInPath iip;
|
||||
|
@ -73,9 +69,6 @@ public class FSDirAttrOp {
|
|||
static HdfsFileStatus setOwner(
|
||||
FSDirectory fsd, String src, String username, String group)
|
||||
throws IOException {
|
||||
if (FSDirectory.isExactReservedName(src)) {
|
||||
throw new InvalidPathException(src);
|
||||
}
|
||||
FSPermissionChecker pc = fsd.getPermissionChecker();
|
||||
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
|
||||
INodesInPath iip;
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
*/
|
||||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathIsNotEmptyDirectoryException;
|
||||
import org.apache.hadoop.fs.UnresolvedLinkException;
|
||||
|
@ -178,10 +177,6 @@ class FSDirDeleteOp {
|
|||
NameNode.stateChangeLog.debug("DIR* NameSystem.delete: " + src);
|
||||
}
|
||||
|
||||
if (FSDirectory.isExactReservedName(src)) {
|
||||
throw new InvalidPathException(src);
|
||||
}
|
||||
|
||||
FSDirectory fsd = fsn.getFSDirectory();
|
||||
BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo();
|
||||
List<INode> removedINodes = new ChunkedArrayList<>();
|
||||
|
|
|
@ -499,12 +499,6 @@ class FSDirRenameOp {
|
|||
+ error);
|
||||
throw new IOException(error);
|
||||
}
|
||||
|
||||
if (FSDirectory.isExactReservedName(src)
|
||||
|| FSDirectory.isExactReservedName(dst)) {
|
||||
error = "Cannot rename to or from /.reserved";
|
||||
throw new InvalidPathException(error);
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateOverwrite(
|
||||
|
|
|
@ -226,9 +226,6 @@ class FSDirStatAndListingOp {
|
|||
throws IOException {
|
||||
String srcs = FSDirectory.normalizePath(src);
|
||||
final boolean isRawPath = FSDirectory.isReservedRawName(src);
|
||||
if (FSDirectory.isExactReservedName(srcs)) {
|
||||
return getReservedListing(fsd);
|
||||
}
|
||||
|
||||
fsd.readLock();
|
||||
try {
|
||||
|
@ -338,15 +335,6 @@ class FSDirStatAndListingOp {
|
|||
listing, snapshots.size() - skipSize - numOfListing);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get a listing of the /.reserved directory.
|
||||
* @param fsd FSDirectory
|
||||
* @return listing containing child directories of /.reserved
|
||||
*/
|
||||
private static DirectoryListing getReservedListing(FSDirectory fsd) {
|
||||
return new DirectoryListing(fsd.getReservedStatuses(), 0);
|
||||
}
|
||||
|
||||
/** Get the file info for a specific file.
|
||||
* @param fsd FSDirectory
|
||||
* @param src The string representation of the path to the file
|
||||
|
@ -383,10 +371,6 @@ class FSDirStatAndListingOp {
|
|||
FSDirectory fsd, String src, boolean resolveLink, boolean isRawPath)
|
||||
throws IOException {
|
||||
String srcs = FSDirectory.normalizePath(src);
|
||||
if (FSDirectory.isExactReservedName(src)) {
|
||||
return FSDirectory.DOT_RESERVED_STATUS;
|
||||
}
|
||||
|
||||
if (srcs.endsWith(HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR)) {
|
||||
if (fsd.getINode4DotSnapshot(srcs) != null) {
|
||||
return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
|
||||
|
|
|
@ -42,8 +42,7 @@ class FSDirSymlinkOp {
|
|||
if (!DFSUtil.isValidName(link)) {
|
||||
throw new InvalidPathException("Invalid link name: " + link);
|
||||
}
|
||||
if (FSDirectory.isReservedName(target) || target.isEmpty()
|
||||
|| FSDirectory.isExactReservedName(target)) {
|
||||
if (FSDirectory.isReservedName(target) || target.isEmpty()) {
|
||||
throw new InvalidPathException("Invalid target name: " + target);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,7 +26,6 @@ import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
|
|||
import org.apache.hadoop.fs.CreateFlag;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.XAttr;
|
||||
import org.apache.hadoop.fs.permission.AclEntry;
|
||||
import org.apache.hadoop.fs.permission.FsAction;
|
||||
|
@ -348,12 +347,6 @@ class FSDirWriteFileOp {
|
|||
" already exists as a directory");
|
||||
}
|
||||
|
||||
if (FSDirectory.isExactReservedName(src) || (FSDirectory.isReservedName(src)
|
||||
&& !FSDirectory.isReservedRawName(src)
|
||||
&& !FSDirectory.isReservedInodesName(src))) {
|
||||
throw new InvalidPathException(src);
|
||||
}
|
||||
|
||||
final INodeFile myFile = INodeFile.valueOf(inode, src, true);
|
||||
if (fsd.isPermissionEnabled()) {
|
||||
if (overwrite && myFile != null) {
|
||||
|
|
|
@ -122,11 +122,6 @@ public class FSDirectory implements Closeable {
|
|||
public final static byte[] DOT_INODES =
|
||||
DFSUtil.string2Bytes(DOT_INODES_STRING);
|
||||
|
||||
public final static HdfsFileStatus DOT_RESERVED_STATUS =
|
||||
new HdfsFileStatus(0, true, 0, 0, 0, 0, new FsPermission((short) 01770),
|
||||
null, null, null, HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||
|
||||
INodeDirectory rootDir;
|
||||
private final FSNamesystem namesystem;
|
||||
private volatile boolean skipQuotaCheck = false; //skip while consuming edits
|
||||
|
@ -174,8 +169,6 @@ public class FSDirectory implements Closeable {
|
|||
|
||||
private final FSEditLog editLog;
|
||||
|
||||
private HdfsFileStatus[] reservedStatuses;
|
||||
|
||||
private INodeAttributeProvider attributeProvider;
|
||||
|
||||
public void setINodeAttributeProvider(INodeAttributeProvider provider) {
|
||||
|
@ -320,43 +313,6 @@ public class FSDirectory implements Closeable {
|
|||
DFSConfigKeys.DFS_NAMENODE_QUOTA_INIT_THREADS_DEFAULT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get HdfsFileStatuses of the reserved paths: .inodes and raw.
|
||||
*
|
||||
* @return Array of HdfsFileStatus
|
||||
*/
|
||||
HdfsFileStatus[] getReservedStatuses() {
|
||||
Preconditions.checkNotNull(reservedStatuses, "reservedStatuses should "
|
||||
+ " not be null. It is populated when FSNamesystem loads FS image."
|
||||
+ " It has to be set at this time instead of initialization time"
|
||||
+ " because CTime is loaded during FSNamesystem#loadFromDisk.");
|
||||
return reservedStatuses;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create HdfsFileStatuses of the reserved paths: .inodes and raw.
|
||||
* These statuses are solely for listing purpose. All other operations
|
||||
* on the reserved dirs are disallowed.
|
||||
* Operations on sub directories are resolved by
|
||||
* {@link FSDirectory#resolvePath(String, byte[][], FSDirectory)}
|
||||
* and conducted directly, without the need to check the reserved dirs.
|
||||
*
|
||||
* This method should only be invoked once during namenode initialization.
|
||||
*
|
||||
* @param cTime CTime of the file system
|
||||
* @return Array of HdfsFileStatus
|
||||
*/
|
||||
void createReservedStatuses(long cTime) {
|
||||
HdfsFileStatus inodes = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
|
||||
new FsPermission((short) 0770), null, supergroup, null,
|
||||
DOT_INODES, -1L, 0, null,
|
||||
HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||
HdfsFileStatus raw = new HdfsFileStatus(0, true, 0, 0, cTime, cTime,
|
||||
new FsPermission((short) 0770), null, supergroup, null, RAW, -1L,
|
||||
0, null, HdfsConstants.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, null);
|
||||
reservedStatuses = new HdfsFileStatus[] {inodes, raw};
|
||||
}
|
||||
|
||||
FSNamesystem getFSNamesystem() {
|
||||
return namesystem;
|
||||
}
|
||||
|
@ -1307,20 +1263,11 @@ public class FSDirectory implements Closeable {
|
|||
return src.startsWith(DOT_RESERVED_PATH_PREFIX + Path.SEPARATOR);
|
||||
}
|
||||
|
||||
public static boolean isExactReservedName(String src) {
|
||||
return CHECK_RESERVED_FILE_NAMES && src.equals(DOT_RESERVED_PATH_PREFIX);
|
||||
}
|
||||
|
||||
static boolean isReservedRawName(String src) {
|
||||
return src.startsWith(DOT_RESERVED_PATH_PREFIX +
|
||||
Path.SEPARATOR + RAW_STRING);
|
||||
}
|
||||
|
||||
static boolean isReservedInodesName(String src) {
|
||||
return src.startsWith(DOT_RESERVED_PATH_PREFIX +
|
||||
Path.SEPARATOR + DOT_INODES_STRING);
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve a /.reserved/... path to a non-reserved path.
|
||||
* <p/>
|
||||
|
@ -1371,15 +1318,9 @@ public class FSDirectory implements Closeable {
|
|||
/* It's /.reserved/raw so strip off the /.reserved/raw prefix. */
|
||||
if (nComponents == 3) {
|
||||
return Path.SEPARATOR;
|
||||
} else {
|
||||
if (nComponents == 4
|
||||
&& Arrays.equals(DOT_RESERVED, pathComponents[3])) {
|
||||
/* It's /.reserved/raw/.reserved so don't strip */
|
||||
return src;
|
||||
} else {
|
||||
return constructRemainingPath("", pathComponents, 3);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
/* It's some sort of /.reserved/<unknown> path. Ignore it. */
|
||||
return src;
|
||||
|
|
|
@ -672,7 +672,6 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
|
|||
if (nnMetrics != null) {
|
||||
nnMetrics.setFsImageLoadTime((int) timeTakenToLoadFSImage);
|
||||
}
|
||||
namesystem.getFSDirectory().createReservedStatuses(namesystem.getCTime());
|
||||
return namesystem;
|
||||
}
|
||||
|
||||
|
|
|
@ -1193,6 +1193,10 @@ public class TestGlobPaths {
|
|||
Assert.assertEquals(reservedRoot,
|
||||
TestPath.mergeStatuses(wrap.
|
||||
globStatus(new Path(reservedRoot), new AcceptAllPathFilter())));
|
||||
// These inodes don't show up via listStatus.
|
||||
Assert.assertEquals("",
|
||||
TestPath.mergeStatuses(wrap.
|
||||
globStatus(new Path("/.reserved/*"), new AcceptAllPathFilter())));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -41,7 +41,6 @@ import org.apache.hadoop.hdfs.protocol.Block;
|
|||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.FsDatasetTestUtils.MaterializedReplica;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.tools.DFSAdmin;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
@ -3026,197 +3025,4 @@ public class TestDFSShell {
|
|||
public void testNoTrashConfig() throws Exception {
|
||||
deleteFileUsingTrash(false, false);
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testListReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
FileStatus test = fs.getFileStatus(new Path("/.reserved"));
|
||||
assertEquals(FSDirectory.DOT_RESERVED_STRING, test.getPath().getName());
|
||||
|
||||
// Listing /.reserved/ should show 2 items: raw and .inodes
|
||||
FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
|
||||
assertEquals(2, stats.length);
|
||||
assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
|
||||
assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
|
||||
stats[0].getGroup());
|
||||
assertEquals("raw", stats[1].getPath().getName());
|
||||
assertEquals(conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY),
|
||||
stats[1].getGroup());
|
||||
|
||||
// Listing / should not show /.reserved
|
||||
stats = fs.listStatus(new Path("/"));
|
||||
assertEquals(0, stats.length);
|
||||
|
||||
// runCmd prints error into System.err, thus verify from there.
|
||||
PrintStream syserr = System.err;
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
PrintStream ps = new PrintStream(baos);
|
||||
System.setErr(ps);
|
||||
|
||||
runCmd(shell, "-ls", "/.reserved");
|
||||
assertEquals(0, baos.toString().length());
|
||||
|
||||
runCmd(shell, "-ls", "/.reserved/raw/.reserved");
|
||||
assertTrue(baos.toString().contains("No such file or directory"));
|
||||
|
||||
System.setErr(syserr);
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testMkdirReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
fs.mkdirs(new Path("/.reserved"));
|
||||
fail("Can't mkdir /.reserved");
|
||||
} catch (Exception e) {
|
||||
// Expected, HadoopIllegalArgumentException thrown from remote
|
||||
assertTrue(e.getMessage().contains("\".reserved\" is reserved"));
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testRmReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
fs.delete(new Path("/.reserved"), true);
|
||||
fail("Can't delete /.reserved");
|
||||
} catch (Exception e) {
|
||||
// Expected, InvalidPathException thrown from remote
|
||||
assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test //(timeout = 30000)
|
||||
public void testCopyReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
|
||||
localFile.createNewFile();
|
||||
final String localfilepath =
|
||||
new Path(localFile.getAbsolutePath()).toUri().toString();
|
||||
try {
|
||||
fs.copyFromLocalFile(new Path(localfilepath), new Path("/.reserved"));
|
||||
fail("Can't copyFromLocal to /.reserved");
|
||||
} catch (Exception e) {
|
||||
// Expected, InvalidPathException thrown from remote
|
||||
assertTrue(e.getMessage().contains("Invalid path name /.reserved"));
|
||||
}
|
||||
|
||||
final String testdir = System.getProperty("test.build.data")
|
||||
+ "/TestDFSShell-testCopyReserved";
|
||||
final Path hdfsTestDir = new Path(testdir);
|
||||
writeFile(fs, new Path(testdir, "testFileForPut"));
|
||||
final Path src = new Path(hdfsTestDir, "srcfile");
|
||||
fs.create(src).close();
|
||||
assertTrue(fs.exists(src));
|
||||
|
||||
// runCmd prints error into System.err, thus verify from there.
|
||||
PrintStream syserr = System.err;
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
PrintStream ps = new PrintStream(baos);
|
||||
System.setErr(ps);
|
||||
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
runCmd(shell, "-cp", src.toString(), "/.reserved");
|
||||
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
|
||||
System.setErr(syserr);
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testChmodReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
||||
// runCmd prints error into System.err, thus verify from there.
|
||||
PrintStream syserr = System.err;
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
PrintStream ps = new PrintStream(baos);
|
||||
System.setErr(ps);
|
||||
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
runCmd(shell, "-chmod", "777", "/.reserved");
|
||||
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
|
||||
System.setErr(syserr);
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testChownReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
||||
// runCmd prints error into System.err, thus verify from there.
|
||||
PrintStream syserr = System.err;
|
||||
final ByteArrayOutputStream baos = new ByteArrayOutputStream();
|
||||
PrintStream ps = new PrintStream(baos);
|
||||
System.setErr(ps);
|
||||
|
||||
FsShell shell = new FsShell();
|
||||
shell.setConf(conf);
|
||||
runCmd(shell, "-chown", "user1", "/.reserved");
|
||||
assertTrue(baos.toString().contains("Invalid path name /.reserved"));
|
||||
System.setErr(syserr);
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testSymLinkReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
try {
|
||||
fs.createSymlink(new Path("/.reserved"), new Path("/rl1"), false);
|
||||
fail("Can't create symlink to /.reserved");
|
||||
} catch (Exception e) {
|
||||
// Expected, InvalidPathException thrown from remote
|
||||
assertTrue(e.getMessage().contains("Invalid target name: /.reserved"));
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
@Test (timeout = 30000)
|
||||
public void testSnapshotReserved() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
DistributedFileSystem fs = cluster.getFileSystem();
|
||||
final Path reserved = new Path("/.reserved");
|
||||
try {
|
||||
fs.allowSnapshot(reserved);
|
||||
fail("Can't allow snapshot on /.reserved");
|
||||
} catch (FileNotFoundException e) {
|
||||
assertTrue(e.getMessage().contains("Directory does not exist"));
|
||||
}
|
||||
try {
|
||||
fs.createSnapshot(reserved, "snap");
|
||||
fail("Can't create snapshot on /.reserved");
|
||||
} catch (FileNotFoundException e) {
|
||||
assertTrue(e.getMessage().contains("Directory does not exist"));
|
||||
}
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -33,7 +33,6 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.client.HdfsAdmin;
|
||||
import org.apache.hadoop.hdfs.server.namenode.EncryptionZoneManager;
|
||||
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.log4j.Level;
|
||||
|
@ -305,12 +304,14 @@ public class TestReservedRawPaths {
|
|||
DFSTestUtil.createFile(fs, baseFileRaw, len, (short) 1, 0xFEED);
|
||||
|
||||
/*
|
||||
* Ensure that you can list /.reserved, with results: raw and .inodes
|
||||
* Ensure that you can't list /.reserved. Ever.
|
||||
*/
|
||||
FileStatus[] stats = fs.listStatus(new Path("/.reserved"));
|
||||
assertEquals(2, stats.length);
|
||||
assertEquals(FSDirectory.DOT_INODES_STRING, stats[0].getPath().getName());
|
||||
assertEquals("raw", stats[1].getPath().getName());
|
||||
try {
|
||||
fs.listStatus(new Path("/.reserved"));
|
||||
fail("expected FNFE");
|
||||
} catch (FileNotFoundException e) {
|
||||
assertExceptionContains("/.reserved does not exist", e);
|
||||
}
|
||||
|
||||
try {
|
||||
fs.listStatus(new Path("/.reserved/.inodes"));
|
||||
|
|
Loading…
Reference in New Issue