HDFS-3210. svn merge -c 1310135 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1310137 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
401d4b3584
commit
879c0ec2b0
|
@ -322,6 +322,9 @@ Release 2.0.0 - UNRELEASED
|
|||
HDFS-3109. Remove hsqldf exclusions from pom.xml. (Ravi Prakash
|
||||
via suresh)
|
||||
|
||||
HDFS-3210. JsonUtil#toJsonMap for for a DatanodeInfo should use
|
||||
"ipAddr" instead of "name". (eli)
|
||||
|
||||
BREAKDOWN OF HDFS-1623 SUBTASKS
|
||||
|
||||
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
|
||||
|
|
|
@ -122,12 +122,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
|
|||
}
|
||||
|
||||
/** Constructor */
|
||||
public DatanodeInfo(final String name, final String hostName,
|
||||
public DatanodeInfo(final String ipAddr, final String hostName,
|
||||
final String storageID, final int xferPort, final int infoPort, final int ipcPort,
|
||||
final long capacity, final long dfsUsed, final long remaining,
|
||||
final long blockPoolUsed, final long lastUpdate, final int xceiverCount,
|
||||
final String networkLocation, final AdminStates adminState) {
|
||||
super(name, hostName, storageID, xferPort, infoPort, ipcPort);
|
||||
super(ipAddr, hostName, storageID, xferPort, infoPort, ipcPort);
|
||||
this.capacity = capacity;
|
||||
this.dfsUsed = dfsUsed;
|
||||
this.remaining = remaining;
|
||||
|
|
|
@ -305,7 +305,7 @@ public class JsonUtil {
|
|||
}
|
||||
|
||||
return new DatanodeInfo(
|
||||
(String)m.get("name"),
|
||||
(String)m.get("ipAddr"),
|
||||
(String)m.get("hostName"),
|
||||
(String)m.get("storageID"),
|
||||
(int)(long)(Long)m.get("xferPort"),
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.io.IOException;
|
|||
import java.io.InputStreamReader;
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.util.Arrays;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
@ -133,8 +134,20 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
|
|||
final BlockLocation[] expected = cluster.getFileSystem().getFileBlockLocations(
|
||||
new Path(f), 0L, 1L);
|
||||
assertEquals(expected.length, computed.length);
|
||||
for(int i = 0; i < computed.length; i++) {
|
||||
for (int i = 0; i < computed.length; i++) {
|
||||
assertEquals(expected[i].toString(), computed[i].toString());
|
||||
// Check names
|
||||
String names1[] = expected[i].getNames();
|
||||
String names2[] = computed[i].getNames();
|
||||
Arrays.sort(names1);
|
||||
Arrays.sort(names2);
|
||||
Assert.assertArrayEquals("Names differ", names1, names2);
|
||||
// Check topology
|
||||
String topos1[] = expected[i].getTopologyPaths();
|
||||
String topos2[] = computed[i].getTopologyPaths();
|
||||
Arrays.sort(topos1);
|
||||
Arrays.sort(topos2);
|
||||
Assert.assertArrayEquals("Topology differs", topos1, topos2);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in New Issue