MAPREDUCE-5729. mapred job -list throws NPE (kasha)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1559811 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Karthik Kambatla 2014-01-20 19:29:41 +00:00
parent 602f71a8da
commit c5a241f1dd
3 changed files with 24 additions and 7 deletions

View File

@ -280,6 +280,8 @@ Release 2.4.0 - UNRELEASED
MAPREDUCE-5724. JobHistoryServer does not start if HDFS is not running.
(tucu)
MAPREDUCE-5729. mapred job -list throws NPE (kasha)
Release 2.3.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueACL;
@ -445,11 +446,18 @@ public class TypeConverter {
jobStatus.setStartTime(application.getStartTime());
jobStatus.setFinishTime(application.getFinishTime());
jobStatus.setFailureInfo(application.getDiagnostics());
jobStatus.setNeededMem(application.getApplicationResourceUsageReport().getNeededResources().getMemory());
jobStatus.setNumReservedSlots(application.getApplicationResourceUsageReport().getNumReservedContainers());
jobStatus.setNumUsedSlots(application.getApplicationResourceUsageReport().getNumUsedContainers());
jobStatus.setReservedMem(application.getApplicationResourceUsageReport().getReservedResources().getMemory());
jobStatus.setUsedMem(application.getApplicationResourceUsageReport().getUsedResources().getMemory());
ApplicationResourceUsageReport resourceUsageReport =
application.getApplicationResourceUsageReport();
if (resourceUsageReport != null) {
jobStatus.setNeededMem(
resourceUsageReport.getNeededResources().getMemory());
jobStatus.setNumReservedSlots(
resourceUsageReport.getNumReservedContainers());
jobStatus.setNumUsedSlots(resourceUsageReport.getNumUsedContainers());
jobStatus.setReservedMem(
resourceUsageReport.getReservedResources().getMemory());
jobStatus.setUsedMem(resourceUsageReport.getUsedResources().getMemory());
}
return jobStatus;
}

View File

@ -23,8 +23,6 @@ import static org.mockito.Mockito.when;
import java.util.ArrayList;
import java.util.List;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobStatus.State;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@ -40,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.Mockito;
@ -112,6 +111,14 @@ public class TestTypeConverter {
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile = "dummy-path/job.xml";
try {
JobStatus status = TypeConverter.fromYarn(mockReport, jobFile);
} catch (NullPointerException npe) {
Assert.fail("Type converstion from YARN fails for jobs without " +
"ApplicationUsageReport");
}
ApplicationResourceUsageReport appUsageRpt = Records
.newRecord(ApplicationResourceUsageReport.class);
Resource r = Records.newRecord(Resource.class);