HADOOP-14726. Mark FileStatus::isDir as final
This commit is contained in:
parent
4d7be1d857
commit
645a8f2a4d
|
@ -172,7 +172,7 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
* @return true if this is a file
|
||||
*/
|
||||
public boolean isFile() {
|
||||
return !isdir && !isSymlink();
|
||||
return !isDirectory() && !isSymlink();
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -182,20 +182,20 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
public boolean isDirectory() {
|
||||
return isdir;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Old interface, instead use the explicit {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* Old interface, instead use the explicit {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* @return true if this is a directory.
|
||||
* @deprecated Use {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* @deprecated Use {@link FileStatus#isFile()},
|
||||
* {@link FileStatus#isDirectory()}, and {@link FileStatus#isSymlink()}
|
||||
* instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public boolean isDir() {
|
||||
return isdir;
|
||||
public final boolean isDir() {
|
||||
return isDirectory();
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Is this a symbolic link?
|
||||
* @return true if this is a symbolic link
|
||||
|
@ -448,7 +448,6 @@ public class FileStatus implements Writable, Comparable<Object>,
|
|||
FileStatus other = PBHelper.convert(proto);
|
||||
isdir = other.isDirectory();
|
||||
length = other.getLen();
|
||||
isdir = other.isDirectory();
|
||||
block_replication = other.getReplication();
|
||||
blocksize = other.getBlockSize();
|
||||
modification_time = other.getModificationTime();
|
||||
|
|
|
@ -61,13 +61,7 @@ class ViewFsFileStatus extends FileStatus {
|
|||
public boolean isDirectory() {
|
||||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("deprecation")
|
||||
public boolean isDir() {
|
||||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isSymlink() {
|
||||
return myFs.isSymlink();
|
||||
|
|
|
@ -49,12 +49,6 @@ class ViewFsLocatedFileStatus extends LocatedFileStatus {
|
|||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
@SuppressWarnings("deprecation")
|
||||
public boolean isDir() {
|
||||
return myFs.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isSymlink() {
|
||||
return myFs.isSymlink();
|
||||
|
|
|
@ -2095,7 +2095,7 @@ public class PBHelperClient {
|
|||
if (fs == null)
|
||||
return null;
|
||||
FileType fType = FileType.IS_FILE;
|
||||
if (fs.isDir()) {
|
||||
if (fs.isDirectory()) {
|
||||
fType = FileType.IS_DIR;
|
||||
} else if (fs.isSymlink()) {
|
||||
fType = FileType.IS_SYMLINK;
|
||||
|
|
|
@ -65,7 +65,9 @@ public class Nfs3Utils {
|
|||
* client takes only the lower 32bit of the fileId and treats it as signed
|
||||
* int. When the 32th bit is 1, the client considers it invalid.
|
||||
*/
|
||||
NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG;
|
||||
NfsFileType fileType = fs.isDirectory()
|
||||
? NfsFileType.NFSDIR
|
||||
: NfsFileType.NFSREG;
|
||||
fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType;
|
||||
int nlink = (fileType == NfsFileType.NFSDIR) ? fs.getChildrenNum() + 2 : 1;
|
||||
long size = (fileType == NfsFileType.NFSDIR) ? getDirSize(fs
|
||||
|
@ -98,7 +100,7 @@ public class Nfs3Utils {
|
|||
return null;
|
||||
}
|
||||
|
||||
long size = fstat.isDir() ? getDirSize(fstat.getChildrenNum()) : fstat
|
||||
long size = fstat.isDirectory() ? getDirSize(fstat.getChildrenNum()) : fstat
|
||||
.getLen();
|
||||
return new WccAttr(size, new NfsTime(fstat.getModificationTime()),
|
||||
new NfsTime(fstat.getModificationTime()));
|
||||
|
|
|
@ -1208,7 +1208,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
if (fstat == null) {
|
||||
return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
|
||||
}
|
||||
if (fstat.isDir()) {
|
||||
if (fstat.isDirectory()) {
|
||||
return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, errWcc);
|
||||
}
|
||||
|
||||
|
@ -1289,7 +1289,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
if (fstat == null) {
|
||||
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc);
|
||||
}
|
||||
if (!fstat.isDir()) {
|
||||
if (!fstat.isDirectory()) {
|
||||
return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc);
|
||||
}
|
||||
|
||||
|
@ -1565,7 +1565,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
LOG.info("Can't get path for fileId: " + handle.getFileId());
|
||||
return new READDIR3Response(Nfs3Status.NFS3ERR_STALE);
|
||||
}
|
||||
if (!dirStatus.isDir()) {
|
||||
if (!dirStatus.isDirectory()) {
|
||||
LOG.error("Can't readdir for regular file, fileId: "
|
||||
+ handle.getFileId());
|
||||
return new READDIR3Response(Nfs3Status.NFS3ERR_NOTDIR);
|
||||
|
@ -1732,7 +1732,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
|
|||
LOG.info("Can't get path for fileId: " + handle.getFileId());
|
||||
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_STALE);
|
||||
}
|
||||
if (!dirStatus.isDir()) {
|
||||
if (!dirStatus.isDirectory()) {
|
||||
LOG.error("Can't readdirplus for regular file, fileId: "
|
||||
+ handle.getFileId());
|
||||
return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_NOTDIR);
|
||||
|
|
|
@ -348,7 +348,7 @@ public class Mover {
|
|||
private void processRecursively(String parent, HdfsFileStatus status,
|
||||
Result result) {
|
||||
String fullPath = status.getFullName(parent);
|
||||
if (status.isDir()) {
|
||||
if (status.isDirectory()) {
|
||||
if (!fullPath.endsWith(Path.SEPARATOR)) {
|
||||
fullPath = fullPath + Path.SEPARATOR;
|
||||
}
|
||||
|
|
|
@ -471,7 +471,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
void check(String parent, HdfsFileStatus file, Result replRes, Result ecRes)
|
||||
throws IOException {
|
||||
String path = file.getFullName(parent);
|
||||
if (file.isDir()) {
|
||||
if (file.isDirectory()) {
|
||||
checkDir(path, replRes, ecRes);
|
||||
return;
|
||||
}
|
||||
|
@ -1115,7 +1115,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
if (lfStatus == null) { // not exists
|
||||
lfInitedOk = dfs.mkdirs(lfName, null, true);
|
||||
lostFound = lfName;
|
||||
} else if (!lfStatus.isDir()) { // exists but not a directory
|
||||
} else if (!lfStatus.isDirectory()) { // exists but not a directory
|
||||
LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
|
||||
lfInitedOk = false;
|
||||
} else { // exists and is a directory
|
||||
|
|
|
@ -51,7 +51,6 @@ import org.apache.hadoop.util.StringUtils;
|
|||
import org.apache.log4j.Logger;
|
||||
import org.junit.Test;
|
||||
|
||||
import static org.apache.hadoop.hdfs.inotify.Event.CreateEvent;
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
|
@ -572,7 +571,7 @@ public class TestDFSUpgradeFromImage {
|
|||
Path path) throws IOException {
|
||||
String pathStr = path.toString();
|
||||
HdfsFileStatus status = dfs.getFileInfo(pathStr);
|
||||
if (!status.isDir()) {
|
||||
if (!status.isDirectory()) {
|
||||
for (int retries = 10; retries > 0; retries--) {
|
||||
if (dfs.recoverLease(pathStr)) {
|
||||
return;
|
||||
|
|
|
@ -283,7 +283,7 @@ public class TestStorageMover {
|
|||
|
||||
private void verifyRecursively(final Path parent,
|
||||
final HdfsFileStatus status) throws Exception {
|
||||
if (status.isDir()) {
|
||||
if (status.isDirectory()) {
|
||||
Path fullPath = parent == null ?
|
||||
new Path("/") : status.getFullPath(parent);
|
||||
DirectoryListing children = dfs.getClient().listPaths(
|
||||
|
|
|
@ -450,7 +450,7 @@ public class TestStartup {
|
|||
namenode.getNamesystem().mkdirs("/test",
|
||||
new PermissionStatus("hairong", null, FsPermission.getDefault()), true);
|
||||
NamenodeProtocols nnRpc = namenode.getRpcServer();
|
||||
assertTrue(nnRpc.getFileInfo("/test").isDir());
|
||||
assertTrue(nnRpc.getFileInfo("/test").isDirectory());
|
||||
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||
nnRpc.saveNamespace(0, 0);
|
||||
namenode.stop();
|
||||
|
@ -481,7 +481,7 @@ public class TestStartup {
|
|||
private void checkNameSpace(Configuration conf) throws IOException {
|
||||
NameNode namenode = new NameNode(conf);
|
||||
NamenodeProtocols nnRpc = namenode.getRpcServer();
|
||||
assertTrue(nnRpc.getFileInfo("/test").isDir());
|
||||
assertTrue(nnRpc.getFileInfo("/test").isDirectory());
|
||||
nnRpc.setSafeMode(SafeModeAction.SAFEMODE_ENTER, false);
|
||||
nnRpc.saveNamespace(0, 0);
|
||||
namenode.stop();
|
||||
|
|
|
@ -124,7 +124,7 @@ public class TestEditLogTailer {
|
|||
|
||||
for (int i = 0; i < DIRS_TO_MAKE / 2; i++) {
|
||||
assertTrue(NameNodeAdapter.getFileInfo(nn2,
|
||||
getDirPath(i), false).isDir());
|
||||
getDirPath(i), false).isDirectory());
|
||||
}
|
||||
|
||||
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
|
||||
|
@ -137,7 +137,7 @@ public class TestEditLogTailer {
|
|||
|
||||
for (int i = DIRS_TO_MAKE / 2; i < DIRS_TO_MAKE; i++) {
|
||||
assertTrue(NameNodeAdapter.getFileInfo(nn2,
|
||||
getDirPath(i), false).isDir());
|
||||
getDirPath(i), false).isDirectory());
|
||||
}
|
||||
} finally {
|
||||
cluster.shutdown();
|
||||
|
|
|
@ -205,7 +205,7 @@ public class TestFailureToReadEdits {
|
|||
TEST_DIR1, false));
|
||||
// Should have been successfully created.
|
||||
assertTrue(NameNodeAdapter.getFileInfo(nn1,
|
||||
TEST_DIR2, false).isDir());
|
||||
TEST_DIR2, false).isDirectory());
|
||||
// Null because it hasn't been created yet.
|
||||
assertNull(NameNodeAdapter.getFileInfo(nn1,
|
||||
TEST_DIR3, false));
|
||||
|
@ -219,10 +219,10 @@ public class TestFailureToReadEdits {
|
|||
TEST_DIR1, false));
|
||||
// Should have been successfully created.
|
||||
assertTrue(NameNodeAdapter.getFileInfo(nn1,
|
||||
TEST_DIR2, false).isDir());
|
||||
TEST_DIR2, false).isDirectory());
|
||||
// Should now have been successfully created.
|
||||
assertTrue(NameNodeAdapter.getFileInfo(nn1,
|
||||
TEST_DIR3, false).isDir());
|
||||
TEST_DIR3, false).isDirectory());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -128,7 +128,7 @@ public class TestInitializeSharedEdits {
|
|||
HATestUtil.waitForStandbyToCatchUp(cluster.getNameNode(0),
|
||||
cluster.getNameNode(1));
|
||||
assertTrue(NameNodeAdapter.getFileInfo(cluster.getNameNode(1),
|
||||
newPath.toString(), false).isDir());
|
||||
newPath.toString(), false).isDirectory());
|
||||
} finally {
|
||||
if (fs != null) {
|
||||
fs.close();
|
||||
|
|
|
@ -154,7 +154,7 @@ public class TestCombineFileInputFormat {
|
|||
@Override
|
||||
public BlockLocation[] getFileBlockLocations(
|
||||
FileStatus stat, long start, long len) throws IOException {
|
||||
if (stat.isDir()) {
|
||||
if (stat.isDirectory()) {
|
||||
return null;
|
||||
}
|
||||
System.out.println("File " + stat.getPath());
|
||||
|
|
|
@ -73,20 +73,20 @@ public class TestOutOfBandAzureBlobOperations {
|
|||
FileStatus[] obtained = fs.listStatus(new Path("/root/b"));
|
||||
assertNotNull(obtained);
|
||||
assertEquals(1, obtained.length);
|
||||
assertFalse(obtained[0].isDir());
|
||||
assertFalse(obtained[0].isDirectory());
|
||||
assertEquals("/root/b", obtained[0].getPath().toUri().getPath());
|
||||
|
||||
// List the directory
|
||||
obtained = fs.listStatus(new Path("/root"));
|
||||
assertNotNull(obtained);
|
||||
assertEquals(1, obtained.length);
|
||||
assertFalse(obtained[0].isDir());
|
||||
assertFalse(obtained[0].isDirectory());
|
||||
assertEquals("/root/b", obtained[0].getPath().toUri().getPath());
|
||||
|
||||
// Get the directory's file status
|
||||
FileStatus dirStatus = fs.getFileStatus(new Path("/root"));
|
||||
assertNotNull(dirStatus);
|
||||
assertTrue(dirStatus.isDir());
|
||||
assertTrue(dirStatus.isDirectory());
|
||||
assertEquals("/root", dirStatus.getPath().toUri().getPath());
|
||||
}
|
||||
|
||||
|
@ -114,7 +114,7 @@ public class TestOutOfBandAzureBlobOperations {
|
|||
FileStatus[] listResult = fs.listStatus(new Path("/root/b"));
|
||||
// File should win.
|
||||
assertEquals(1, listResult.length);
|
||||
assertFalse(listResult[0].isDir());
|
||||
assertFalse(listResult[0].isDirectory());
|
||||
try {
|
||||
// Trying to delete root/b/c would cause a dilemma for WASB, so
|
||||
// it should throw.
|
||||
|
|
|
@ -71,7 +71,7 @@ public class SwiftFileStatus extends FileStatus {
|
|||
* @return true if the status is considered to be a file
|
||||
*/
|
||||
@Override
|
||||
public boolean isDir() {
|
||||
public boolean isDirectory() {
|
||||
return super.isDirectory() || getLen() == 0;
|
||||
}
|
||||
|
||||
|
@ -79,19 +79,11 @@ public class SwiftFileStatus extends FileStatus {
|
|||
* A entry is a file if it is not a directory.
|
||||
* By implementing it <i>and not marking as an override</i> this
|
||||
* subclass builds and runs in both Hadoop versions.
|
||||
* @return the opposite value to {@link #isDir()}
|
||||
* @return the opposite value to {@link #isDirectory()}
|
||||
*/
|
||||
@Override
|
||||
public boolean isFile() {
|
||||
return !isDir();
|
||||
}
|
||||
|
||||
/**
|
||||
* Directory test
|
||||
* @return true if the file is considered to be a directory
|
||||
*/
|
||||
public boolean isDirectory() {
|
||||
return isDir();
|
||||
return !this.isDirectory();
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -100,7 +92,7 @@ public class SwiftFileStatus extends FileStatus {
|
|||
sb.append(getClass().getSimpleName());
|
||||
sb.append("{ ");
|
||||
sb.append("path=").append(getPath());
|
||||
sb.append("; isDirectory=").append(isDir());
|
||||
sb.append("; isDirectory=").append(isDirectory());
|
||||
sb.append("; length=").append(getLen());
|
||||
sb.append("; blocksize=").append(getBlockSize());
|
||||
sb.append("; modification_time=").append(getModificationTime());
|
||||
|
|
|
@ -578,7 +578,7 @@ public class SwiftNativeFileSystemStore {
|
|||
|
||||
//enum the child entries and everything underneath
|
||||
List<FileStatus> childStats = listDirectory(srcObject, true, true);
|
||||
boolean srcIsFile = !srcMetadata.isDir();
|
||||
boolean srcIsFile = !srcMetadata.isDirectory();
|
||||
if (srcIsFile) {
|
||||
|
||||
//source is a simple file OR a partitioned file
|
||||
|
@ -945,7 +945,7 @@ public class SwiftNativeFileSystemStore {
|
|||
//>1 entry implies directory with children. Run through them,
|
||||
// but first check for the recursive flag and reject it *unless it looks
|
||||
// like a partitioned file (len > 0 && has children)
|
||||
if (!fileStatus.isDir()) {
|
||||
if (!fileStatus.isDirectory()) {
|
||||
LOG.debug("Multiple child entries but entry has data: assume partitioned");
|
||||
} else if (!recursive) {
|
||||
//if there are children, unless this is a recursive operation, fail immediately
|
||||
|
|
|
@ -87,7 +87,7 @@ public class TestSwiftFileSystemDirectories extends SwiftFileSystemBaseTest {
|
|||
assertEquals("Wrong number of elements in file status " + statusString, 1,
|
||||
statuses.length);
|
||||
SwiftFileStatus stat = (SwiftFileStatus) statuses[0];
|
||||
assertTrue("isDir(): Not a directory: " + stat, stat.isDir());
|
||||
assertTrue("isDir(): Not a directory: " + stat, stat.isDirectory());
|
||||
extraStatusAssertions(stat);
|
||||
}
|
||||
|
||||
|
@ -135,7 +135,7 @@ public class TestSwiftFileSystemDirectories extends SwiftFileSystemBaseTest {
|
|||
SwiftTestUtils.writeTextFile(fs, src, "testMultiByteFilesAreFiles", false);
|
||||
assertIsFile(src);
|
||||
FileStatus status = fs.getFileStatus(src);
|
||||
assertFalse(status.isDir());
|
||||
assertFalse(status.isDirectory());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -228,7 +228,7 @@ public class TestSwiftFileSystemPartitionedUploads extends
|
|||
status.getLen());
|
||||
String fileInfo = qualifiedPath + " " + status;
|
||||
assertFalse("File claims to be a directory " + fileInfo,
|
||||
status.isDir());
|
||||
status.isDirectory());
|
||||
|
||||
FileStatus listedFileStat = resolveChild(parentDirListing, qualifiedPath);
|
||||
assertNotNull("Did not find " + path + " in " + parentDirLS,
|
||||
|
|
Loading…
Reference in New Issue