HDFS-2404. webhdfs liststatus json response is not correct. Contributed by Suresh Srinivas.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1180757 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
a09a2b48cd
commit
676f488eff
|
@ -86,11 +86,11 @@ Trunk (unreleased changes)
|
|||
there are two SC_START_IN_CTOR findbugs warnings. (szetszwo)
|
||||
|
||||
HDFS-2330. In NNStorage and FSImagePreTransactionalStorageInspector,
|
||||
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
|
||||
IOExceptions of stream closures can mask root exceptions. (Uma Maheswara
|
||||
Rao G via szetszwo)
|
||||
|
||||
HDFS-46. Change default namespace quota of root directory from
|
||||
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
|
||||
Integer.MAX_VALUE to Long.MAX_VALUE. (Uma Maheswara Rao G via szetszwo)
|
||||
|
||||
HDFS-2366. Initialize WebHdfsFileSystem.ugi in object construction.
|
||||
(szetszwo)
|
||||
|
@ -104,10 +104,12 @@ Trunk (unreleased changes)
|
|||
not include multiple methods of the same name. (cutting)
|
||||
|
||||
HDFS-2403. NamenodeWebHdfsMethods.generateDelegationToken(..) does not use
|
||||
the renewer parameter. (szetszwo)
|
||||
the renewer parameter. (szetszwo)
|
||||
|
||||
HDFS-2409. _HOST in dfs.web.authentication.kerberos.principal. (jitendra)
|
||||
|
||||
HDFS-2404. webhdfs liststatus json response is not correct. (suresh)
|
||||
|
||||
Release 0.23.0 - Unreleased
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -424,7 +424,7 @@ public class NamenodeWebHdfsMethods {
|
|||
case GETFILESTATUS:
|
||||
{
|
||||
final HdfsFileStatus status = np.getFileInfo(fullpath);
|
||||
final String js = JsonUtil.toJsonString(status);
|
||||
final String js = JsonUtil.toJsonString(status, true);
|
||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||
}
|
||||
case LISTSTATUS:
|
||||
|
@ -480,22 +480,22 @@ public class NamenodeWebHdfsMethods {
|
|||
@Override
|
||||
public void write(final OutputStream outstream) throws IOException {
|
||||
final PrintStream out = new PrintStream(outstream);
|
||||
out.println("{\"" + HdfsFileStatus[].class.getSimpleName() + "\":[");
|
||||
out.println("{\"" + HdfsFileStatus.class.getSimpleName() + "\":[");
|
||||
|
||||
final HdfsFileStatus[] partial = first.getPartialListing();
|
||||
if (partial.length > 0) {
|
||||
out.print(JsonUtil.toJsonString(partial[0]));
|
||||
out.print(JsonUtil.toJsonString(partial[0], false));
|
||||
}
|
||||
for(int i = 1; i < partial.length; i++) {
|
||||
out.println(',');
|
||||
out.print(JsonUtil.toJsonString(partial[i]));
|
||||
out.print(JsonUtil.toJsonString(partial[i], false));
|
||||
}
|
||||
|
||||
for(DirectoryListing curr = first; curr.hasMore(); ) {
|
||||
curr = getDirectoryListing(np, p, curr.getLastName());
|
||||
for(HdfsFileStatus s : curr.getPartialListing()) {
|
||||
out.println(',');
|
||||
out.print(JsonUtil.toJsonString(s));
|
||||
out.print(JsonUtil.toJsonString(s, false));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -133,37 +133,39 @@ public class JsonUtil {
|
|||
}
|
||||
|
||||
/** Convert a HdfsFileStatus object to a Json string. */
|
||||
public static String toJsonString(final HdfsFileStatus status) {
|
||||
public static String toJsonString(final HdfsFileStatus status,
|
||||
boolean includeType) {
|
||||
if (status == null) {
|
||||
return null;
|
||||
} else {
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("localName", status.getLocalName());
|
||||
m.put("isDir", status.isDir());
|
||||
m.put("isSymlink", status.isSymlink());
|
||||
if (status.isSymlink()) {
|
||||
m.put("symlink", status.getSymlink());
|
||||
}
|
||||
|
||||
m.put("len", status.getLen());
|
||||
m.put("owner", status.getOwner());
|
||||
m.put("group", status.getGroup());
|
||||
m.put("permission", toString(status.getPermission()));
|
||||
m.put("accessTime", status.getAccessTime());
|
||||
m.put("modificationTime", status.getModificationTime());
|
||||
m.put("blockSize", status.getBlockSize());
|
||||
m.put("replication", status.getReplication());
|
||||
return toJsonString(HdfsFileStatus.class, m);
|
||||
}
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("localName", status.getLocalName());
|
||||
m.put("isDir", status.isDir());
|
||||
m.put("isSymlink", status.isSymlink());
|
||||
if (status.isSymlink()) {
|
||||
m.put("symlink", status.getSymlink());
|
||||
}
|
||||
|
||||
m.put("len", status.getLen());
|
||||
m.put("owner", status.getOwner());
|
||||
m.put("group", status.getGroup());
|
||||
m.put("permission", toString(status.getPermission()));
|
||||
m.put("accessTime", status.getAccessTime());
|
||||
m.put("modificationTime", status.getModificationTime());
|
||||
m.put("blockSize", status.getBlockSize());
|
||||
m.put("replication", status.getReplication());
|
||||
return includeType ? toJsonString(HdfsFileStatus.class, m) :
|
||||
JSON.toString(m);
|
||||
}
|
||||
|
||||
/** Convert a Json map to a HdfsFileStatus object. */
|
||||
public static HdfsFileStatus toFileStatus(final Map<?, ?> json) {
|
||||
public static HdfsFileStatus toFileStatus(final Map<?, ?> json, boolean includesType) {
|
||||
if (json == null) {
|
||||
return null;
|
||||
}
|
||||
|
||||
final Map<?, ?> m = (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName());
|
||||
final Map<?, ?> m = includesType ?
|
||||
(Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName()) : json;
|
||||
final String localName = (String) m.get("localName");
|
||||
final boolean isDir = (Boolean) m.get("isDir");
|
||||
final boolean isSymlink = (Boolean) m.get("isSymlink");
|
||||
|
@ -287,7 +289,7 @@ public class JsonUtil {
|
|||
return array;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/** Convert a LocatedBlock to a Json map. */
|
||||
private static Map<String, Object> toJsonMap(final LocatedBlock locatedblock
|
||||
) throws IOException {
|
||||
|
|
|
@ -253,7 +253,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
|
|||
private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
|
||||
final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
|
||||
final Map<String, Object> json = run(op, f);
|
||||
final HdfsFileStatus status = JsonUtil.toFileStatus(json);
|
||||
final HdfsFileStatus status = JsonUtil.toFileStatus(json, true);
|
||||
if (status == null) {
|
||||
throw new FileNotFoundException("File does not exist: " + f);
|
||||
}
|
||||
|
@ -405,14 +405,14 @@ public class WebHdfsFileSystem extends HftpFileSystem {
|
|||
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
|
||||
final Map<?, ?> json = run(op, f);
|
||||
final Object[] array = (Object[])json.get(
|
||||
HdfsFileStatus[].class.getSimpleName());
|
||||
HdfsFileStatus.class.getSimpleName());
|
||||
|
||||
//convert FileStatus
|
||||
final FileStatus[] statuses = new FileStatus[array.length];
|
||||
for(int i = 0; i < array.length; i++) {
|
||||
@SuppressWarnings("unchecked")
|
||||
final Map<String, Object> m = (Map<String, Object>)array[i];
|
||||
statuses[i] = makeQualified(JsonUtil.toFileStatus(m), f);
|
||||
statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
|
||||
}
|
||||
return statuses;
|
||||
}
|
||||
|
@ -472,4 +472,4 @@ public class WebHdfsFileSystem extends HftpFileSystem {
|
|||
final Map<String, Object> m = run(op, p);
|
||||
return JsonUtil.toMD5MD5CRC32FileChecksum(m);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,9 +46,9 @@ public class TestJsonUtil {
|
|||
final FileStatus fstatus = toFileStatus(status, parent);
|
||||
System.out.println("status = " + status);
|
||||
System.out.println("fstatus = " + fstatus);
|
||||
final String json = JsonUtil.toJsonString(status);
|
||||
final String json = JsonUtil.toJsonString(status, true);
|
||||
System.out.println("json = " + json.replace(",", ",\n "));
|
||||
final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json));
|
||||
final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map<?, ?>)JSON.parse(json), true);
|
||||
final FileStatus fs2 = toFileStatus(s2, parent);
|
||||
System.out.println("s2 = " + s2);
|
||||
System.out.println("fs2 = " + fs2);
|
||||
|
|
Loading…
Reference in New Issue