From 5922f4f064db22df396397780820621cfc21b68a Mon Sep 17 00:00:00 2001 From: Robert Joseph Evans Date: Fri, 11 May 2012 20:27:58 +0000 Subject: [PATCH] svn merge -c 1337363 FIXES: MAPREDUCE-4233. NPE can happen in RMNMNodeInfo. (bobby) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1337364 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 + .../hadoop/mapreduce/v2/TestRMNMInfo.java | 43 ++++++++++++++++++- .../yarn/server/resourcemanager/RMNMInfo.java | 10 +++-- 3 files changed, 49 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 171aed484c9..91dd5ba14e3 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -372,6 +372,8 @@ Release 0.23.3 - UNRELEASED MAPREDUCE-4237. TestNodeStatusUpdater can fail if localhost has a domain associated with it (bobby) + MAPREDUCE-4233. NPE can happen in RMNMNodeInfo. (bobby) + Release 0.23.2 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java index d3b47520715..4ee485644d9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestRMNMInfo.java @@ -21,6 +21,8 @@ package org.apache.hadoop.mapreduce.v2; import java.io.File; import java.io.IOException; import java.util.Iterator; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -28,8 +30,11 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMNMInfo; +import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.codehaus.jackson.JsonNode; import org.codehaus.jackson.map.ObjectMapper; @@ -37,6 +42,7 @@ import org.junit.AfterClass; import org.junit.Assert; import org.junit.BeforeClass; import org.junit.Test; +import static org.mockito.Mockito.*; public class TestRMNMInfo { private static final Log LOG = LogFactory.getLog(TestRMNMInfo.class); @@ -116,14 +122,47 @@ public class TestRMNMInfo { n.get("HealthStatus").getValueAsText().contains("Healthy")); Assert.assertNotNull(n.get("LastHealthUpdate")); Assert.assertNotNull(n.get("HealthReport")); - Assert.assertNotNull(n.get("NumContainersMB")); + Assert.assertNotNull(n.get("NumContainers")); Assert.assertEquals( n.get("NodeId") + ": Unexpected number of used containers", - 0, n.get("NumContainersMB").getValueAsInt()); + 0, n.get("NumContainers").getValueAsInt()); Assert.assertEquals( n.get("NodeId") + ": Unexpected amount of used memory", 0, n.get("UsedMemoryMB").getValueAsInt()); Assert.assertNotNull(n.get("AvailableMemoryMB")); } } + + @Test + public void testRMNMInfoMissmatch() throws Exception { + RMContext rmc = mock(RMContext.class); + ResourceScheduler rms = mock(ResourceScheduler.class); + ConcurrentMap map = new ConcurrentHashMap(); + RMNode node = MockNodes.newNodeInfo(1, MockNodes.newResource(4 * 1024)); + map.put(node.getNodeID(), node); + when(rmc.getRMNodes()).thenReturn(map); + + RMNMInfo rmInfo = new RMNMInfo(rmc,rms); + String liveNMs = rmInfo.getLiveNodeManagers(); + ObjectMapper mapper = new ObjectMapper(); + JsonNode jn = mapper.readTree(liveNMs); + Assert.assertEquals("Unexpected number of live nodes:", + 1, jn.size()); + Iterator it = jn.iterator(); + while (it.hasNext()) { + JsonNode n = it.next(); + Assert.assertNotNull(n.get("HostName")); + Assert.assertNotNull(n.get("Rack")); + Assert.assertTrue("Node " + n.get("NodeId") + " should be RUNNING", + n.get("State").getValueAsText().contains("RUNNING")); + Assert.assertNotNull(n.get("NodeHTTPAddress")); + Assert.assertTrue("Node " + n.get("NodeId") + " should be Healthy", + n.get("HealthStatus").getValueAsText().contains("Healthy")); + Assert.assertNotNull(n.get("LastHealthUpdate")); + Assert.assertNotNull(n.get("HealthReport")); + Assert.assertNull(n.get("NumContainers")); + Assert.assertNull(n.get("UsedMemoryMB")); + Assert.assertNull(n.get("AvailableMemoryMB")); + } + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java index 34d203578f3..0db42e40ec0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java @@ -93,10 +93,12 @@ public class RMNMInfo implements RMNMInfoBeans { ni.getNodeHealthStatus().getLastHealthReportTime()); info.put("HealthReport", ni.getNodeHealthStatus().getHealthReport()); - info.put("NumContainersMB", report.getNumContainers()); - info.put("UsedMemoryMB", report.getUsedResource().getMemory()); - info.put("AvailableMemoryMB", - report.getAvailableResource().getMemory()); + if(report != null) { + info.put("NumContainers", report.getNumContainers()); + info.put("UsedMemoryMB", report.getUsedResource().getMemory()); + info.put("AvailableMemoryMB", + report.getAvailableResource().getMemory()); + } nodesInfo.add(info); }