diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index df85179ccc3..d250b9a2f05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -430,6 +430,9 @@ Release 2.0.0 - UNRELEASED HDFS-3109. Remove hsqldf exclusions from pom.xml. (Ravi Prakash via suresh) + HDFS-3210. JsonUtil#toJsonMap for for a DatanodeInfo should use + "ipAddr" instead of "name". (eli) + BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 22e0851f05d..963e535b99a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -122,12 +122,12 @@ public class DatanodeInfo extends DatanodeID implements Node { } /** Constructor */ - public DatanodeInfo(final String name, final String hostName, + public DatanodeInfo(final String ipAddr, final String hostName, final String storageID, final int xferPort, final int infoPort, final int ipcPort, final long capacity, final long dfsUsed, final long remaining, final long blockPoolUsed, final long lastUpdate, final int xceiverCount, final String networkLocation, final AdminStates adminState) { - super(name, hostName, storageID, xferPort, infoPort, ipcPort); + super(ipAddr, hostName, storageID, xferPort, infoPort, ipcPort); this.capacity = capacity; this.dfsUsed = dfsUsed; this.remaining = remaining; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java index 45c4445e84b..9218078a482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java @@ -305,7 +305,7 @@ public class JsonUtil { } return new DatanodeInfo( - (String)m.get("name"), + (String)m.get("ipAddr"), (String)m.get("hostName"), (String)m.get("storageID"), (int)(long)(Long)m.get("xferPort"), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index a216713ac55..04ffd10b35d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -24,6 +24,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; +import java.util.Arrays; import java.util.Map; import javax.servlet.http.HttpServletResponse; @@ -133,8 +134,20 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations( new Path(f), 0L, 1L); assertEquals(expected.length, computed.length); - for(int i = 0; i < computed.length; i++) { + for (int i = 0; i < computed.length; i++) { assertEquals(expected[i].toString(), computed[i].toString()); + // Check names + String names1[] = expected[i].getNames(); + String names2[] = computed[i].getNames(); + Arrays.sort(names1); + Arrays.sort(names2); + Assert.assertArrayEquals("Names differ", names1, names2); + // Check topology + String topos1[] = expected[i].getTopologyPaths(); + String topos2[] = computed[i].getTopologyPaths(); + Arrays.sort(topos1); + Arrays.sort(topos2); + Assert.assertArrayEquals("Topology differs", topos1, topos2); } }