HDFS-14665. HttpFS: LISTSTATUS response is missing HDFS-specific fields (#1291) Contributed by Siyao Meng.
This commit is contained in:
parent
7a1f508f58
commit
e0a5fee7f5
|
@ -174,7 +174,10 @@ public class HttpFSFileSystem extends FileSystem
|
||||||
public static final String ACCESS_TIME_JSON = "accessTime";
|
public static final String ACCESS_TIME_JSON = "accessTime";
|
||||||
public static final String MODIFICATION_TIME_JSON = "modificationTime";
|
public static final String MODIFICATION_TIME_JSON = "modificationTime";
|
||||||
public static final String BLOCK_SIZE_JSON = "blockSize";
|
public static final String BLOCK_SIZE_JSON = "blockSize";
|
||||||
|
public static final String CHILDREN_NUM_JSON = "childrenNum";
|
||||||
|
public static final String FILE_ID_JSON = "fileId";
|
||||||
public static final String REPLICATION_JSON = "replication";
|
public static final String REPLICATION_JSON = "replication";
|
||||||
|
public static final String STORAGEPOLICY_JSON = "storagePolicy";
|
||||||
public static final String XATTRS_JSON = "XAttrs";
|
public static final String XATTRS_JSON = "XAttrs";
|
||||||
public static final String XATTR_NAME_JSON = "name";
|
public static final String XATTR_NAME_JSON = "name";
|
||||||
public static final String XATTR_VALUE_JSON = "value";
|
public static final String XATTR_VALUE_JSON = "value";
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.lib.service.FileSystemAccess;
|
import org.apache.hadoop.lib.service.FileSystemAccess;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
@ -114,6 +115,16 @@ public class FSOperations {
|
||||||
fileStatus.getModificationTime());
|
fileStatus.getModificationTime());
|
||||||
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
|
json.put(HttpFSFileSystem.BLOCK_SIZE_JSON, fileStatus.getBlockSize());
|
||||||
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
|
json.put(HttpFSFileSystem.REPLICATION_JSON, fileStatus.getReplication());
|
||||||
|
if (fileStatus instanceof HdfsFileStatus) {
|
||||||
|
// Add HDFS-specific fields to response
|
||||||
|
HdfsFileStatus hdfsFileStatus = (HdfsFileStatus) fileStatus;
|
||||||
|
json.put(HttpFSFileSystem.CHILDREN_NUM_JSON,
|
||||||
|
hdfsFileStatus.getChildrenNum());
|
||||||
|
json.put(HttpFSFileSystem.FILE_ID_JSON,
|
||||||
|
hdfsFileStatus.getFileId());
|
||||||
|
json.put(HttpFSFileSystem.STORAGEPOLICY_JSON,
|
||||||
|
hdfsFileStatus.getStoragePolicy());
|
||||||
|
}
|
||||||
if (fileStatus.getPermission().getAclBit()) {
|
if (fileStatus.getPermission().getAclBit()) {
|
||||||
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
|
json.put(HttpFSFileSystem.ACL_BIT_JSON, true);
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
|
||||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.test.HFSTestCase;
|
import org.apache.hadoop.test.HFSTestCase;
|
||||||
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
|
import org.apache.hadoop.test.HadoopUsersConfTestHelper;
|
||||||
|
@ -345,6 +346,7 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
||||||
|
|
||||||
private void testListStatus() throws Exception {
|
private void testListStatus() throws Exception {
|
||||||
FileSystem fs = FileSystem.get(getProxiedFSConf());
|
FileSystem fs = FileSystem.get(getProxiedFSConf());
|
||||||
|
boolean isDFS = fs instanceof DistributedFileSystem;
|
||||||
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
|
Path path = new Path(getProxiedFSTestDir(), "foo.txt");
|
||||||
OutputStream os = fs.create(path);
|
OutputStream os = fs.create(path);
|
||||||
os.write(1);
|
os.write(1);
|
||||||
|
@ -366,6 +368,18 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
|
||||||
assertEquals(status2.getOwner(), status1.getOwner());
|
assertEquals(status2.getOwner(), status1.getOwner());
|
||||||
assertEquals(status2.getGroup(), status1.getGroup());
|
assertEquals(status2.getGroup(), status1.getGroup());
|
||||||
assertEquals(status2.getLen(), status1.getLen());
|
assertEquals(status2.getLen(), status1.getLen());
|
||||||
|
if (isDFS && status2 instanceof HdfsFileStatus) {
|
||||||
|
assertTrue(status1 instanceof HdfsFileStatus);
|
||||||
|
HdfsFileStatus hdfsFileStatus1 = (HdfsFileStatus) status1;
|
||||||
|
HdfsFileStatus hdfsFileStatus2 = (HdfsFileStatus) status2;
|
||||||
|
// Check HDFS-specific fields
|
||||||
|
assertEquals(hdfsFileStatus2.getChildrenNum(),
|
||||||
|
hdfsFileStatus1.getChildrenNum());
|
||||||
|
assertEquals(hdfsFileStatus2.getFileId(),
|
||||||
|
hdfsFileStatus1.getFileId());
|
||||||
|
assertEquals(hdfsFileStatus2.getStoragePolicy(),
|
||||||
|
hdfsFileStatus1.getStoragePolicy());
|
||||||
|
}
|
||||||
|
|
||||||
FileStatus[] stati = fs.listStatus(path.getParent());
|
FileStatus[] stati = fs.listStatus(path.getParent());
|
||||||
assertEquals(1, stati.length);
|
assertEquals(1, stati.length);
|
||||||
|
|
Loading…
Reference in New Issue