From 88aece40831a695c0871a056a986c04edac6ea44 Mon Sep 17 00:00:00 2001 From: Siyao Meng <50227127+smengcl@users.noreply.github.com> Date: Tue, 13 Aug 2019 16:27:57 -0700 Subject: [PATCH] HDFS-14665. HttpFS: LISTSTATUS response is missing HDFS-specific fields (#1267) Contributed by Siyao Meng. (cherry picked from commit 6ae8bc3a4a07c6b4e7060362b749be8c7afe0560) --- .../hadoop/fs/http/client/HttpFSFileSystem.java | 3 +++ .../apache/hadoop/fs/http/server/FSOperations.java | 11 +++++++++++ .../hadoop/fs/http/client/BaseTestHttpFSWith.java | 14 ++++++++++++++ 3 files changed, 28 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java index 1c1b93b3e22..1efafe74f0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/client/HttpFSFileSystem.java @@ -177,7 +177,10 @@ public class HttpFSFileSystem extends FileSystem public static final String ACCESS_TIME_JSON = "accessTime"; public static final String MODIFICATION_TIME_JSON = "modificationTime"; public static final String BLOCK_SIZE_JSON = "blockSize"; + public static final String CHILDREN_NUM_JSON = "childrenNum"; + public static final String FILE_ID_JSON = "fileId"; public static final String REPLICATION_JSON = "replication"; + public static final String STORAGEPOLICY_JSON = "storagePolicy"; public static final String XATTRS_JSON = "XAttrs"; public static final String XATTR_NAME_JSON = "name"; public static final String XATTR_VALUE_JSON = "value"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java index 7f0b5d26427..3f792560b21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/fs/http/server/FSOperations.java @@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.web.JsonUtil; @@ -118,6 +119,16 @@ public class FSOperations { fileStatus.getModificationTime()); json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize()); json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication()); + if (fileStatus instanceof HdfsFileStatus) { + // Add HDFS-specific fields to response + HdfsFileStatus hdfsFileStatus = (HdfsFileStatus) fileStatus; + json.put(HttpFSFileSystem.CHILDREN_NUM_JSON, + hdfsFileStatus.getChildrenNum()); + json.put(HttpFSFileSystem.FILE_ID_JSON, + hdfsFileStatus.getFileId()); + json.put(HttpFSFileSystem.STORAGEPOLICY_JSON, + hdfsFileStatus.getStoragePolicy()); + } if (fileStatus.getPermission().getAclBit()) { json.put(HttpFSFileSystem.ACL_BIT_JSON, true); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java index 6d1f673980e..d8e1379f7b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/fs/http/client/BaseTestHttpFSWith.java @@ -42,6 +42,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; @@ -350,6 +351,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { private void testListStatus() throws Exception { FileSystem fs = FileSystem.get(getProxiedFSConf()); + boolean isDFS = fs instanceof DistributedFileSystem; Path path = new Path(getProxiedFSTestDir(), "foo.txt"); OutputStream os = fs.create(path); os.write(1); @@ -371,6 +373,18 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase { assertEquals(status2.getOwner(), status1.getOwner()); assertEquals(status2.getGroup(), status1.getGroup()); assertEquals(status2.getLen(), status1.getLen()); + if (isDFS && status2 instanceof HdfsFileStatus) { + assertTrue(status1 instanceof HdfsFileStatus); + HdfsFileStatus hdfsFileStatus1 = (HdfsFileStatus) status1; + HdfsFileStatus hdfsFileStatus2 = (HdfsFileStatus) status2; + // Check HDFS-specific fields + assertEquals(hdfsFileStatus2.getChildrenNum(), + hdfsFileStatus1.getChildrenNum()); + assertEquals(hdfsFileStatus2.getFileId(), + hdfsFileStatus1.getFileId()); + assertEquals(hdfsFileStatus2.getStoragePolicy(), + hdfsFileStatus1.getStoragePolicy()); + } FileStatus[] stati = fs.listStatus(path.getParent()); assertEquals(1, stati.length);