HDFS-14665. HttpFS: LISTSTATUS response is missing HDFS-specific fields (#1267) Contributed by Siyao Meng.

This commit is contained in:
Siyao Meng 2019-08-13 16:27:57 -07:00 committed by Wei-Chiu Chuang
parent 3cff73aff4
commit 6ae8bc3a4a
3 changed files with 28 additions and 0 deletions

View File

@ -177,7 +177,10 @@ public class HttpFSFileSystem extends FileSystem
public static final String ACCESS_TIME_JSON = "accessTime"; public static final String ACCESS_TIME_JSON = "accessTime";
public static final String MODIFICATION_TIME_JSON = "modificationTime"; public static final String MODIFICATION_TIME_JSON = "modificationTime";
public static final String BLOCK_SIZE_JSON = "blockSize"; public static final String BLOCK_SIZE_JSON = "blockSize";
public static final String CHILDREN_NUM_JSON = "childrenNum";
public static final String FILE_ID_JSON = "fileId";
public static final String REPLICATION_JSON = "replication"; public static final String REPLICATION_JSON = "replication";
public static final String STORAGEPOLICY_JSON = "storagePolicy";
public static final String XATTRS_JSON = "XAttrs"; public static final String XATTRS_JSON = "XAttrs";
public static final String XATTR_NAME_JSON = "name"; public static final String XATTR_NAME_JSON = "name";
public static final String XATTR_VALUE_JSON = "value"; public static final String XATTR_VALUE_JSON = "value";

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
@ -118,6 +119,16 @@ public class FSOperations {
fileStatus.getModificationTime()); fileStatus.getModificationTime());
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize()); json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication()); json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
if (fileStatus instanceof HdfsFileStatus) {
// Add HDFS-specific fields to response
HdfsFileStatus hdfsFileStatus = (HdfsFileStatus) fileStatus;
json.put(HttpFSFileSystem.CHILDREN_NUM_JSON,
hdfsFileStatus.getChildrenNum());
json.put(HttpFSFileSystem.FILE_ID_JSON,
hdfsFileStatus.getFileId());
json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
hdfsFileStatus.getStoragePolicy());
}
if (fileStatus.getPermission().getAclBit()) { if (fileStatus.getPermission().getAclBit()) {
json.put(HttpFSFileSystem.ACL_BIT_JSON, true); json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
} }

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.protocol.SnapshotException;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
@ -360,6 +361,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
private void testListStatus() throws Exception { private void testListStatus() throws Exception {
FileSystem fs = FileSystem.get(getProxiedFSConf()); FileSystem fs = FileSystem.get(getProxiedFSConf());
boolean isDFS = fs instanceof DistributedFileSystem;
Path path = new Path(getProxiedFSTestDir(), "foo.txt"); Path path = new Path(getProxiedFSTestDir(), "foo.txt");
OutputStream os = fs.create(path); OutputStream os = fs.create(path);
os.write(1); os.write(1);
@ -381,6 +383,18 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
assertEquals(status2.getOwner(), status1.getOwner()); assertEquals(status2.getOwner(), status1.getOwner());
assertEquals(status2.getGroup(), status1.getGroup()); assertEquals(status2.getGroup(), status1.getGroup());
assertEquals(status2.getLen(), status1.getLen()); assertEquals(status2.getLen(), status1.getLen());
if (isDFS && status2 instanceof HdfsFileStatus) {
assertTrue(status1 instanceof HdfsFileStatus);
HdfsFileStatus hdfsFileStatus1 = (HdfsFileStatus) status1;
HdfsFileStatus hdfsFileStatus2 = (HdfsFileStatus) status2;
// Check HDFS-specific fields
assertEquals(hdfsFileStatus2.getChildrenNum(),
hdfsFileStatus1.getChildrenNum());
assertEquals(hdfsFileStatus2.getFileId(),
hdfsFileStatus1.getFileId());
assertEquals(hdfsFileStatus2.getStoragePolicy(),
hdfsFileStatus1.getStoragePolicy());
}
FileStatus[] stati = fs.listStatus(path.getParent()); FileStatus[] stati = fs.listStatus(path.getParent());
assertEquals(1, stati.length); assertEquals(1, stati.length);