From 2449795b8e296baba0b1c157c0ab3b856bd05f7e Mon Sep 17 00:00:00 2001 From: Siyao Meng Date: Fri, 21 Sep 2018 12:37:43 -0700 Subject: [PATCH] HDFS-13830. Backport HDFS-13141 to branch-3.0: WebHDFS: Add support for getting snasphottable directory list. Contributed by Siyao Meng, Lokesh Jain. Signed-off-by: Wei-Chiu Chuang --- .../java/org/apache/hadoop/fs/FileStatus.java | 26 +++++-- .../hadoop/hdfs/DFSOpsCountStatistics.java | 1 + .../hadoop/hdfs/protocol/HdfsFileStatus.java | 28 ++++++- .../hadoop/hdfs/web/JsonUtilClient.java | 43 +++++++++++ .../hadoop/hdfs/web/WebHdfsFileSystem.java | 14 ++++ .../hadoop/hdfs/web/resources/GetOpParam.java | 3 +- .../web/resources/NamenodeWebHdfsMethods.java | 7 ++ .../org/apache/hadoop/hdfs/web/JsonUtil.java | 20 +++++ .../apache/hadoop/hdfs/web/TestWebHDFS.java | 73 ++++++++++++++++++- 9 files changed, 205 insertions(+), 10 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java index 35f531696c9..bdfbd20be8c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java @@ -56,15 +56,25 @@ public class FileStatus implements Writable, Comparable, private Path symlink; private Set attr; - private enum AttrFlags { + public enum AttrFlags { HAS_ACL, HAS_CRYPT, HAS_EC, SNAPSHOT_ENABLED } - private static final Set NONE = Collections.emptySet(); - private static Set flags(boolean acl, boolean crypt, boolean ec) { - if (!(acl || crypt || ec)) { + public static final Set NONE = Collections.emptySet(); + + /** + * Convert boolean attributes to a set of flags. + * @param acl See {@link AttrFlags#HAS_ACL}. + * @param crypt See {@link AttrFlags#HAS_CRYPT}. + * @param ec See {@link AttrFlags#HAS_EC}. + * @param sn See {@link AttrFlags#SNAPSHOT_ENABLED}. + * @return converted set of flags. + */ + public static Set flags(boolean acl, boolean crypt, + boolean ec, boolean sn) { + if (!(acl || crypt || ec || sn)) { return NONE; } EnumSet ret = EnumSet.noneOf(AttrFlags.class); @@ -77,6 +87,9 @@ public class FileStatus implements Writable, Comparable, if (ec) { ret.add(AttrFlags.HAS_EC); } + if (sn) { + ret.add(AttrFlags.SNAPSHOT_ENABLED); + } return ret; } @@ -136,7 +149,7 @@ public class FileStatus implements Writable, Comparable, this.group = (group == null) ? "" : group; this.symlink = symlink; this.path = path; - attr = flags(hasAcl, isEncrypted, isErasureCoded); + attr = flags(hasAcl, isEncrypted, isErasureCoded, false); // The variables isdir and symlink indicate the type: // 1. isdir implies directory, in which case symlink must be null. @@ -480,7 +493,8 @@ public class FileStatus implements Writable, Comparable, setGroup(other.getGroup()); setSymlink((other.isSymlink() ? other.getSymlink() : null)); setPath(other.getPath()); - attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded()); + attr = flags(other.hasAcl(), other.isEncrypted(), other.isErasureCoded(), + other.isSnapshotEnabled()); assert !(isDirectory() && isSymlink()) : "A directory cannot be a symlink"; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java index bbd1bd71565..3dcf13b080a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOpsCountStatistics.java @@ -88,6 +88,7 @@ public class DFSOpsCountStatistics extends StorageStatistics { SET_TIMES(CommonStatisticNames.OP_SET_TIMES), SET_XATTR("op_set_xattr"), GET_SNAPSHOT_DIFF("op_get_snapshot_diff"), + GET_SNAPSHOTTABLE_DIRECTORY_LIST("op_get_snapshottable_directory_list"), TRUNCATE(CommonStatisticNames.OP_TRUNCATE), UNSET_STORAGE_POLICY("op_unset_storage_policy"); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java index 0499f2e66b0..f6faba0808a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.net.URI; import java.util.Arrays; import java.util.EnumSet; +import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -58,7 +59,32 @@ public class HdfsFileStatus extends FileStatus { HAS_ACL, HAS_CRYPT, HAS_EC, - SNAPSHOT_ENABLED + SNAPSHOT_ENABLED; + + /** + * Generates an enum set of Flags from a set of attr flags. + * @param attr Set of attr flags + * @return EnumSet of Flags + */ + public static EnumSet convert(Set attr) { + if (attr.isEmpty()) { + return EnumSet.noneOf(Flags.class); + } + EnumSet flags = EnumSet.noneOf(Flags.class); + if (attr.contains(AttrFlags.HAS_ACL)) { + flags.add(Flags.HAS_ACL); + } + if (attr.contains(AttrFlags.HAS_EC)) { + flags.add(Flags.HAS_EC); + } + if (attr.contains(AttrFlags.HAS_CRYPT)) { + flags.add(Flags.HAS_CRYPT); + } + if (attr.contains(AttrFlags.SNAPSHOT_ENABLED)) { + flags.add(Flags.SNAPSHOT_ENABLED); + } + return flags; + } } private final EnumSet flags; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java index d8a3135a402..91eaae0fbfc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/JsonUtilClient.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -64,6 +65,7 @@ import java.util.Collections; import java.util.EnumSet; import java.util.List; import java.util.Map; +import java.util.Set; class JsonUtilClient { static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {}; @@ -746,4 +748,45 @@ class JsonUtilClient { } return DFSUtilClient.string2Bytes(str); } + + public static SnapshottableDirectoryStatus[] toSnapshottableDirectoryList( + final Map json) { + if (json == null) { + return null; + } + List list = (List) json.get("SnapshottableDirectoryList"); + if (list == null) { + return null; + } + SnapshottableDirectoryStatus[] statuses = + new SnapshottableDirectoryStatus[list.size()]; + for (int i = 0; i < list.size(); i++) { + statuses[i] = toSnapshottableDirectoryStatus((Map) list.get(i)); + } + return statuses; + } + + private static SnapshottableDirectoryStatus toSnapshottableDirectoryStatus( + Map json) { + if (json == null) { + return null; + } + int snapshotNumber = getInt(json, "snapshotNumber", 0); + int snapshotQuota = getInt(json, "snapshotQuota", 0); + byte[] parentFullPath = toByteArray((String) json.get("parentFullPath")); + HdfsFileStatus dirStatus = + toFileStatus((Map) json.get("dirStatus"), false); + Set attrFlags = FileStatus + .flags(dirStatus.hasAcl(), dirStatus.isEncrypted(), + dirStatus.isErasureCoded(), dirStatus.isSnapshotEnabled()); + SnapshottableDirectoryStatus snapshottableDirectoryStatus = + new SnapshottableDirectoryStatus(dirStatus.getModificationTime(), + dirStatus.getAccessTime(), dirStatus.getPermission(), + HdfsFileStatus.Flags.convert(attrFlags), dirStatus.getOwner(), + dirStatus.getGroup(), dirStatus.getLocalNameInBytes(), + dirStatus.getFileId(), dirStatus.getChildrenNum(), snapshotNumber, + snapshotQuota, parentFullPath); + return snapshottableDirectoryStatus; + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 7f04b830d08..bd20315ef34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -97,6 +97,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.FileEncryptionInfoProto; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; @@ -1331,6 +1332,19 @@ public class WebHdfsFileSystem extends FileSystem }.run(); } + public SnapshottableDirectoryStatus[] getSnapshottableDirectoryList() + throws IOException { + storageStatistics + .incrementOpCounter(OpType.GET_SNAPSHOTTABLE_DIRECTORY_LIST); + final HttpOpParam.Op op = GetOpParam.Op.GETSNAPSHOTTABLEDIRECTORYLIST; + return new FsPathResponseRunner(op, null) { + @Override + SnapshottableDirectoryStatus[] decodeResponse(Map json) { + return JsonUtilClient.toSnapshottableDirectoryList(json); + } + }.run(); + } + @Override public boolean setReplication(final Path p, final short replication ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java index 7061d8265d2..85bb11d94b7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/GetOpParam.java @@ -48,7 +48,8 @@ public class GetOpParam extends HttpOpParam { CHECKACCESS(false, HttpURLConnection.HTTP_OK), LISTSTATUS_BATCH(false, HttpURLConnection.HTTP_OK), GETSERVERDEFAULTS(false, HttpURLConnection.HTTP_OK), - GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK); + GETSNAPSHOTDIFF(false, HttpURLConnection.HTTP_OK), + GETSNAPSHOTTABLEDIRECTORYLIST(false, HttpURLConnection.HTTP_OK); final boolean redirect; final int expectedHttpResponseCode; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index adfd74078e8..e9f56289f62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -79,6 +79,7 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; +import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocolPB.PBHelperClient; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; @@ -1221,6 +1222,12 @@ public class NamenodeWebHdfsMethods { final String js = JsonUtil.toJsonString(diffReport); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } + case GETSNAPSHOTTABLEDIRECTORYLIST: { + SnapshottableDirectoryStatus[] snapshottableDirectoryList = + cp.getSnapshottableDirListing(); + final String js = JsonUtil.toJsonString(snapshottableDirectoryList); + return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); + } default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 83fbc6e807a..bdb2c07b644 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -530,4 +530,24 @@ public class JsonUtil { } return m; } + + public static String toJsonString( + SnapshottableDirectoryStatus[] snapshottableDirectoryList) { + Object[] a = new Object[snapshottableDirectoryList.length]; + for (int i = 0; i < snapshottableDirectoryList.length; i++) { + a[i] = toJsonMap(snapshottableDirectoryList[i]); + } + return toJsonString("SnapshottableDirectoryList", a); + } + + private static Object toJsonMap( + SnapshottableDirectoryStatus snapshottableDirectoryStatus) { + final Map m = new TreeMap(); + m.put("snapshotNumber", snapshottableDirectoryStatus.getSnapshotNumber()); + m.put("snapshotQuota", snapshottableDirectoryStatus.getSnapshotQuota()); + m.put("parentFullPath", DFSUtilClient + .bytes2String(snapshottableDirectoryStatus.getParentFullPath())); + m.put("dirStatus", toJsonMap(snapshottableDirectoryStatus.getDirStatus())); + return m; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 1984c0ef218..8f63060a0f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -640,7 +640,7 @@ public class TestWebHDFS { } /** - * Test snapshot deletion through WebHdfs + * Test snapshot deletion through WebHdfs. */ @Test public void testWebHdfsDeleteSnapshot() throws Exception { @@ -685,7 +685,7 @@ public class TestWebHDFS { } /** - * Test snapshot diff through WebHdfs + * Test snapshot diff through WebHdfs. */ @Test public void testWebHdfsSnapshotDiff() throws Exception { @@ -756,6 +756,75 @@ public class TestWebHDFS { } } + /** + * Test snapshottable directory list through WebHdfs. + */ + @Test + public void testWebHdfsSnapshottableDirectoryList() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + final DistributedFileSystem dfs = cluster.getFileSystem(); + final WebHdfsFileSystem webHdfs = WebHdfsTestUtil + .getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME); + final Path foo = new Path("/foo"); + final Path bar = new Path("/bar"); + dfs.mkdirs(foo); + dfs.mkdirs(bar); + dfs.allowSnapshot(foo); + dfs.allowSnapshot(bar); + Path file0 = new Path(foo, "file0"); + DFSTestUtil.createFile(dfs, file0, 100, (short) 1, 0); + Path file1 = new Path(bar, "file1"); + DFSTestUtil.createFile(dfs, file1, 100, (short) 1, 0); + SnapshottableDirectoryStatus[] statuses = + webHdfs.getSnapshottableDirectoryList(); + SnapshottableDirectoryStatus[] dfsStatuses = + dfs.getSnapshottableDirListing(); + + for (int i = 0; i < dfsStatuses.length; i++) { + Assert.assertEquals(statuses[i].getSnapshotNumber(), + dfsStatuses[i].getSnapshotNumber()); + Assert.assertEquals(statuses[i].getSnapshotQuota(), + dfsStatuses[i].getSnapshotQuota()); + Assert.assertTrue(Arrays.equals(statuses[i].getParentFullPath(), + dfsStatuses[i].getParentFullPath())); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getChildrenNum(), + statuses[i].getDirStatus().getChildrenNum()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getModificationTime(), + statuses[i].getDirStatus().getModificationTime()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().isDirectory(), + statuses[i].getDirStatus().isDirectory()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getAccessTime(), + statuses[i].getDirStatus().getAccessTime()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getPermission(), + statuses[i].getDirStatus().getPermission()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getOwner(), + statuses[i].getDirStatus().getOwner()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getGroup(), + statuses[i].getDirStatus().getGroup()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getPath(), + statuses[i].getDirStatus().getPath()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().getFileId(), + statuses[i].getDirStatus().getFileId()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().hasAcl(), + statuses[i].getDirStatus().hasAcl()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().isEncrypted(), + statuses[i].getDirStatus().isEncrypted()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().isErasureCoded(), + statuses[i].getDirStatus().isErasureCoded()); + Assert.assertEquals(dfsStatuses[i].getDirStatus().isSnapshotEnabled(), + statuses[i].getDirStatus().isSnapshotEnabled()); + } + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + @Test public void testWebHdfsCreateNonRecursive() throws IOException, URISyntaxException { MiniDFSCluster cluster = null;