diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 20fa890695c..536e186a274 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1095,6 +1095,8 @@ Release 0.23.0 - Unreleased HDFS-2409. _HOST in dfs.web.authentication.kerberos.principal. (jitendra) + HDFS-2404. webhdfs liststatus json response is not correct. (suresh) + BREAKDOWN OF HDFS-1073 SUBTASKS HDFS-1521. Persist transaction ID on disk between NN restarts. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index e5f3c02f20f..a624236a87e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -424,7 +424,7 @@ public class NamenodeWebHdfsMethods { case GETFILESTATUS: { final HdfsFileStatus status = np.getFileInfo(fullpath); - final String js = JsonUtil.toJsonString(status); + final String js = JsonUtil.toJsonString(status, true); return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); } case LISTSTATUS: @@ -480,22 +480,22 @@ public class NamenodeWebHdfsMethods { @Override public void write(final OutputStream outstream) throws IOException { final PrintStream out = new PrintStream(outstream); - out.println("{\"" + HdfsFileStatus[].class.getSimpleName() + "\":["); + out.println("{\"" + HdfsFileStatus.class.getSimpleName() + "\":["); final HdfsFileStatus[] partial = first.getPartialListing(); if (partial.length > 0) { - out.print(JsonUtil.toJsonString(partial[0])); + out.print(JsonUtil.toJsonString(partial[0], false)); } for(int i = 1; i < partial.length; i++) { out.println(','); - out.print(JsonUtil.toJsonString(partial[i])); + out.print(JsonUtil.toJsonString(partial[i], false)); } for(DirectoryListing curr = first; curr.hasMore(); ) { curr = getDirectoryListing(np, p, curr.getLastName()); for(HdfsFileStatus s : curr.getPartialListing()) { out.println(','); - out.print(JsonUtil.toJsonString(s)); + out.print(JsonUtil.toJsonString(s, false)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index adf639c32bd..0efc3d2e8ae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -133,37 +133,39 @@ public class JsonUtil { } /** Convert a HdfsFileStatus object to a Json string. */ - public static String toJsonString(final HdfsFileStatus status) { + public static String toJsonString(final HdfsFileStatus status, + boolean includeType) { if (status == null) { return null; - } else { - final Map m = new TreeMap(); - m.put("localName", status.getLocalName()); - m.put("isDir", status.isDir()); - m.put("isSymlink", status.isSymlink()); - if (status.isSymlink()) { - m.put("symlink", status.getSymlink()); - } - - m.put("len", status.getLen()); - m.put("owner", status.getOwner()); - m.put("group", status.getGroup()); - m.put("permission", toString(status.getPermission())); - m.put("accessTime", status.getAccessTime()); - m.put("modificationTime", status.getModificationTime()); - m.put("blockSize", status.getBlockSize()); - m.put("replication", status.getReplication()); - return toJsonString(HdfsFileStatus.class, m); } + final Map m = new TreeMap(); + m.put("localName", status.getLocalName()); + m.put("isDir", status.isDir()); + m.put("isSymlink", status.isSymlink()); + if (status.isSymlink()) { + m.put("symlink", status.getSymlink()); + } + + m.put("len", status.getLen()); + m.put("owner", status.getOwner()); + m.put("group", status.getGroup()); + m.put("permission", toString(status.getPermission())); + m.put("accessTime", status.getAccessTime()); + m.put("modificationTime", status.getModificationTime()); + m.put("blockSize", status.getBlockSize()); + m.put("replication", status.getReplication()); + return includeType ? toJsonString(HdfsFileStatus.class, m) : + JSON.toString(m); } /** Convert a Json map to a HdfsFileStatus object. */ - public static HdfsFileStatus toFileStatus(final Map json) { + public static HdfsFileStatus toFileStatus(final Map json, boolean includesType) { if (json == null) { return null; } - final Map m = (Map)json.get(HdfsFileStatus.class.getSimpleName()); + final Map m = includesType ? + (Map)json.get(HdfsFileStatus.class.getSimpleName()) : json; final String localName = (String) m.get("localName"); final boolean isDir = (Boolean) m.get("isDir"); final boolean isSymlink = (Boolean) m.get("isSymlink"); @@ -287,7 +289,7 @@ public class JsonUtil { return array; } } - + /** Convert a LocatedBlock to a Json map. */ private static Map toJsonMap(final LocatedBlock locatedblock ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 27d6fe166ea..1cd20f1468f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -253,7 +253,7 @@ public class WebHdfsFileSystem extends HftpFileSystem { private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; final Map json = run(op, f); - final HdfsFileStatus status = JsonUtil.toFileStatus(json); + final HdfsFileStatus status = JsonUtil.toFileStatus(json, true); if (status == null) { throw new FileNotFoundException("File does not exist: " + f); } @@ -405,14 +405,14 @@ public class WebHdfsFileSystem extends HftpFileSystem { final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; final Map json = run(op, f); final Object[] array = (Object[])json.get( - HdfsFileStatus[].class.getSimpleName()); + HdfsFileStatus.class.getSimpleName()); //convert FileStatus final FileStatus[] statuses = new FileStatus[array.length]; for(int i = 0; i < array.length; i++) { @SuppressWarnings("unchecked") final Map m = (Map)array[i]; - statuses[i] = makeQualified(JsonUtil.toFileStatus(m), f); + statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f); } return statuses; } @@ -472,4 +472,4 @@ public class WebHdfsFileSystem extends HftpFileSystem { final Map m = run(op, p); return JsonUtil.toMD5MD5CRC32FileChecksum(m); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java index a4b687d5e71..7f6aa36a6ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java @@ -46,9 +46,9 @@ public class TestJsonUtil { final FileStatus fstatus = toFileStatus(status, parent); System.out.println("status = " + status); System.out.println("fstatus = " + fstatus); - final String json = JsonUtil.toJsonString(status); + final String json = JsonUtil.toJsonString(status, true); System.out.println("json = " + json.replace(",", ",\n ")); - final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map)JSON.parse(json)); + final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map)JSON.parse(json), true); final FileStatus fs2 = toFileStatus(s2, parent); System.out.println("s2 = " + s2); System.out.println("fs2 = " + fs2);