diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index 78312b3213c..35f531696c9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -60,7 +60,8 @@ public class FileStatus implements Writable, Comparable, HAS_ACL, HAS_CRYPT, HAS_EC, - }; + SNAPSHOT_ENABLED + } private static final Set NONE = Collections.emptySet(); private static Set flags(boolean acl, boolean crypt, boolean ec) { if (!(acl || crypt || ec)) { @@ -273,6 +274,15 @@ public class FileStatus implements Writable, Comparable, return attr.contains(AttrFlags.HAS_EC); } + /** + * Check if directory is Snapshot enabled or not. + * + * @return true if directory is snapshot enabled + */ + public boolean isSnapshotEnabled() { + return attr.contains(AttrFlags.SNAPSHOT_ENABLED); + } + /** * Get the owner of the file. * @return owner of the file. The string could be empty if there is no @@ -330,6 +340,19 @@ public class FileStatus implements Writable, Comparable, this.group = (group == null) ? "" : group; } + /** + * Sets Snapshot enabled flag. + * + * @param isSnapShotEnabled When true, SNAPSHOT_ENABLED flag is set + */ + public void setSnapShotEnabledFlag(boolean isSnapShotEnabled) { + if (isSnapShotEnabled) { + attr.add(AttrFlags.SNAPSHOT_ENABLED); + } else { + attr.remove(AttrFlags.SNAPSHOT_ENABLED); + } + } + /** * @return The contents of the symbolic link. */ @@ -486,4 +509,6 @@ public class FileStatus implements Writable, Comparable, } } + + } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java index aa7d8f57663..23caf2e778b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/protocolPB/PBHelper.java @@ -94,11 +94,15 @@ public final class PBHelper { owner = proto.getOwner(); group = proto.getGroup(); int flags = proto.getFlags(); - return new FileStatus(length, isdir, blockReplication, blocksize, - mtime, atime, permission, owner, group, symlink, path, - (flags & FileStatusProto.Flags.HAS_ACL_VALUE) != 0, + FileStatus fileStatus = new FileStatus(length, isdir, blockReplication, + blocksize, mtime, atime, permission, owner, group, symlink, path, + (flags & FileStatusProto.Flags.HAS_ACL_VALUE) != 0, (flags & FileStatusProto.Flags.HAS_CRYPT_VALUE) != 0, - (flags & FileStatusProto.Flags.HAS_EC_VALUE) != 0); + (flags & FileStatusProto.Flags.HAS_EC_VALUE) != 0); + + fileStatus.setSnapShotEnabledFlag((flags & FileStatusProto.Flags + .SNAPSHOT_ENABLED_VALUE) != 0); + return fileStatus; } public static FileStatusProto convert(FileStatus stat) throws IOException { @@ -124,6 +128,8 @@ public final class PBHelper { flags |= stat.hasAcl() ? FileStatusProto.Flags.HAS_ACL_VALUE : 0; flags |= stat.isEncrypted() ? FileStatusProto.Flags.HAS_CRYPT_VALUE : 0; flags |= stat.isErasureCoded() ? FileStatusProto.Flags.HAS_EC_VALUE : 0; + flags |= stat.isSnapshotEnabled() ? FileStatusProto.Flags + .SNAPSHOT_ENABLED_VALUE : 0; bld.setFlags(flags); return bld.build(); } diff --git a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto index 6fe79804450..5b8c45d0ad1 100644 --- a/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto +++ b/hadoop-common-project/hadoop-common/src/main/proto/FSProtos.proto @@ -44,9 +44,10 @@ message FileStatusProto { FT_SYMLINK = 3; } enum Flags { - HAS_ACL = 0x01; // has ACLs - HAS_CRYPT = 0x02; // encrypted - HAS_EC = 0x04; // erasure coded + HAS_ACL = 0x01; // has ACLs + HAS_CRYPT = 0x02; // encrypted + HAS_EC = 0x04; // erasure coded + SNAPSHOT_ENABLED = 0x08; // snapshot enabled } required FileType fileType = 1; required string path = 2; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index c8b1c26f238..0499f2e66b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -57,7 +57,8 @@ public class HdfsFileStatus extends FileStatus { public enum Flags { HAS_ACL, HAS_CRYPT, - HAS_EC; + HAS_EC, + SNAPSHOT_ENABLED } private final EnumSet flags; @@ -247,6 +248,15 @@ public class HdfsFileStatus extends FileStatus { return storagePolicy; } + /** + * Check if directory is Snapshot enabled or not. + * + * @return true if directory is snapshot enabled + */ + public boolean isSnapshotEnabled() { + return flags.contains(Flags.SNAPSHOT_ENABLED); + } + @Override public boolean equals(Object o) { // satisfy findbugs diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java index 8d25d2c9eb7..7549da62d44 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java @@ -1606,6 +1606,9 @@ public class PBHelperClient { case HAS_EC: f.add(HdfsFileStatus.Flags.HAS_EC); break; + case SNAPSHOT_ENABLED: + f.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED); + break; default: // ignore unknown break; @@ -2159,6 +2162,8 @@ public class PBHelperClient { int flags = fs.hasAcl() ? HdfsFileStatusProto.Flags.HAS_ACL_VALUE : 0; flags |= fs.isEncrypted() ? HdfsFileStatusProto.Flags.HAS_CRYPT_VALUE : 0; flags |= fs.isErasureCoded() ? HdfsFileStatusProto.Flags.HAS_EC_VALUE : 0; + flags |= fs.isSnapshotEnabled() ? HdfsFileStatusProto.Flags + .SNAPSHOT_ENABLED_VALUE : 0; builder.setFlags(flags); return builder.build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto index 7769f6833d3..c38a9be828a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/hdfs.proto @@ -410,6 +410,7 @@ message HdfsFileStatusProto { HAS_ACL = 0x01; // has ACLs HAS_CRYPT = 0x02; // encrypted HAS_EC = 0x04; // erasure coded + SNAPSHOT_ENABLED = 0x08; // SNAPSHOT ENABLED } required FileType fileType = 1; required bytes path = 2; // local name of inode encoded java UTF8 diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 1d169cbd782..4f0dc66fc7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -199,6 +199,7 @@ public class HttpFSFileSystem extends FileSystem public static final String ENC_BIT_JSON = "encBit"; public static final String EC_BIT_JSON = "ecBit"; + public static final String SNAPSHOT_BIT_JSON = "seBit"; public static final String DIRECTORY_LISTING_JSON = "DirectoryListing"; public static final String PARTIAL_LISTING_JSON = "partialListing"; @@ -1067,19 +1068,27 @@ public class HttpFSFileSystem extends FileSystem final Boolean aclBit = (Boolean) json.get(ACL_BIT_JSON); final Boolean encBit = (Boolean) json.get(ENC_BIT_JSON); final Boolean erasureBit = (Boolean) json.get(EC_BIT_JSON); + final Boolean snapshotEnabledBit = (Boolean) json.get(SNAPSHOT_BIT_JSON); final boolean aBit = (aclBit != null) ? aclBit : false; final boolean eBit = (encBit != null) ? encBit : false; final boolean ecBit = (erasureBit != null) ? erasureBit : false; - if (aBit || eBit || ecBit) { + final boolean seBit = + (snapshotEnabledBit != null) ? snapshotEnabledBit : false; + if (aBit || eBit || ecBit || seBit) { // include this for compatibility with 2.x FsPermissionExtension deprecatedPerm = new FsPermissionExtension(permission, aBit, eBit, ecBit); - return new FileStatus(len, FILE_TYPE.DIRECTORY == type, + FileStatus fileStatus = new FileStatus(len, FILE_TYPE.DIRECTORY == type, replication, blockSize, mTime, aTime, deprecatedPerm, owner, group, null, path, aBit, eBit, ecBit); + if (seBit) { + fileStatus.setSnapShotEnabledFlag(seBit); + } + return fileStatus; + } else { + return new FileStatus(len, FILE_TYPE.DIRECTORY == type, + replication, blockSize, mTime, aTime, permission, owner, group, path); } - return new FileStatus(len, FILE_TYPE.DIRECTORY == type, - replication, blockSize, mTime, aTime, permission, owner, group, path); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index d31007b9d1f..889489f1270 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -950,6 +950,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } @Override + public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller, GetFileLinkInfoRequestProto req) throws ServiceException { try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java index 73e5acbe70f..c5fa4c71a89 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java @@ -412,6 +412,8 @@ class FSDirStatAndListingOp { .unprotectedGetErasureCodingPolicy(fsd.getFSNamesystem(), iip); final boolean isErasureCoded = (ecPolicy != null); + boolean isSnapShottable = false; + if (node.isFile()) { final INodeFile fileNode = node.asFile(); size = fileNode.computeFileSize(snapshot); @@ -432,6 +434,8 @@ class FSDirStatAndListingOp { loc = new LocatedBlocks(); } } + } else if (node.isDirectory()) { + isSnapShottable = node.asDirectory().isSnapshottable(); } int childrenNum = node.isDirectory() ? @@ -450,6 +454,9 @@ class FSDirStatAndListingOp { if (isErasureCoded) { flags.add(HdfsFileStatus.Flags.HAS_EC); } + if(isSnapShottable){ + flags.add(HdfsFileStatus.Flags.SNAPSHOT_ENABLED); + } return createFileStatus( size, node.isDirectory(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index b78b06335c8..83fbc6e807a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -138,6 +138,10 @@ public class JsonUtil { m.put("ecPolicy", status.getErasureCodingPolicy().getName()); } } + if (status.isSnapshotEnabled()) { + m.put("snapshotEnabled", status.isSnapshotEnabled()); + } + m.put("accessTime", status.getAccessTime()); m.put("modificationTime", status.getModificationTime()); m.put("blockSize", status.getBlockSize()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index d9ce232bba3..96366a69ec6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -441,6 +441,7 @@ See also: [`newlength`](#New_Length), [FileSystem](../../api/org/apache/hadoop/f "pathSuffix" : "", "permission" : "777", "replication" : 0, + "snapshotEnabled" : true "type" : "DIRECTORY" //enum {FILE, DIRECTORY, SYMLINK} } } @@ -486,6 +487,7 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileSt "pathSuffix" : "bar", "permission" : "711", "replication" : 0, + "snapshotEnabled" : true "type" : "DIRECTORY" }, ... diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 288a17fef81..072ee9fcfd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -1283,6 +1283,25 @@ public class TestDistributedFileSystem { } } + @Test + public void testListStatusOfSnapshotDirs() throws IOException { + MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()) + .build(); + try { + DistributedFileSystem dfs = cluster.getFileSystem(); + dfs.create(new Path("/parent/test1/dfsclose/file-0")); + Path snapShotDir = new Path("/parent/test1/"); + dfs.allowSnapshot(snapShotDir); + + FileStatus status = dfs.getFileStatus(new Path("/parent/test1")); + assertTrue(status.isSnapshotEnabled()); + status = dfs.getFileStatus(new Path("/parent/")); + assertFalse(status.isSnapshotEnabled()); + } finally { + cluster.shutdown(); + } + } + @Test(timeout=10000) public void testDFSClientPeerReadTimeout() throws IOException { final int timeout = 1000;