svn merge -c 1483974 FIXES: MAPREDUCE-4927. Historyserver 500 error due to NPE when accessing specific counters page for failed job. Contributed by Ashwin Shankar
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1483976 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
d180262cf9
commit
759e518509
|
@ -261,6 +261,9 @@ Release 2.0.5-beta - UNRELEASED
|
||||||
MAPREDUCE-5244. Two functions changed their visibility in JobStatus.
|
MAPREDUCE-5244. Two functions changed their visibility in JobStatus.
|
||||||
(zjshen via tucu)
|
(zjshen via tucu)
|
||||||
|
|
||||||
|
MAPREDUCE-4927. Historyserver 500 error due to NPE when accessing specific
|
||||||
|
counters page for failed job. (Ashwin Shankar via jlowe)
|
||||||
|
|
||||||
Release 2.0.4-alpha - 2013-04-25
|
Release 2.0.4-alpha - 2013-04-25
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
@ -832,6 +835,8 @@ Release 0.23.8 - UNRELEASED
|
||||||
MAPREDUCE-5147. Maven build should create
|
MAPREDUCE-5147. Maven build should create
|
||||||
hadoop-mapreduce-client-app-VERSION.jar directly (Robert Parker via tgraves)
|
hadoop-mapreduce-client-app-VERSION.jar directly (Robert Parker via tgraves)
|
||||||
|
|
||||||
|
MAPREDUCE-4927. Historyserver 500 error due to NPE when accessing specific
|
||||||
|
counters page for failed job. (Ashwin Shankar via jlowe)
|
||||||
|
|
||||||
Release 0.23.7 - UNRELEASED
|
Release 0.23.7 - UNRELEASED
|
||||||
|
|
||||||
|
|
|
@ -143,8 +143,9 @@ public class SingleCounterBlock extends HtmlBlock {
|
||||||
Map<TaskId, Task> tasks = job.getTasks();
|
Map<TaskId, Task> tasks = job.getTasks();
|
||||||
for(Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
|
for(Map.Entry<TaskId, Task> entry : tasks.entrySet()) {
|
||||||
long value = 0;
|
long value = 0;
|
||||||
CounterGroup group = entry.getValue().getCounters()
|
Counters counters = entry.getValue().getCounters();
|
||||||
.getGroup($(COUNTER_GROUP));
|
CounterGroup group = (counters != null) ? counters
|
||||||
|
.getGroup($(COUNTER_GROUP)) : null;
|
||||||
if(group != null) {
|
if(group != null) {
|
||||||
Counter c = group.findCounter($(COUNTER_NAME));
|
Counter c = group.findCounter($(COUNTER_NAME));
|
||||||
if(c != null) {
|
if(c != null) {
|
||||||
|
|
|
@ -182,6 +182,11 @@ public class TestAMWebApp {
|
||||||
|
|
||||||
@Test public void testSingleCounterView() {
|
@Test public void testSingleCounterView() {
|
||||||
AppContext appContext = new TestAppContext();
|
AppContext appContext = new TestAppContext();
|
||||||
|
Job job = appContext.getAllJobs().values().iterator().next();
|
||||||
|
// add a failed task to the job without any counters
|
||||||
|
Task failedTask = MockJobs.newTask(job.getID(), 2, 1, true);
|
||||||
|
Map<TaskId,Task> tasks = job.getTasks();
|
||||||
|
tasks.put(failedTask.getID(), failedTask);
|
||||||
Map<String, String> params = getJobParams(appContext);
|
Map<String, String> params = getJobParams(appContext);
|
||||||
params.put(AMParams.COUNTER_GROUP,
|
params.put(AMParams.COUNTER_GROUP,
|
||||||
"org.apache.hadoop.mapreduce.FileSystemCounter");
|
"org.apache.hadoop.mapreduce.FileSystemCounter");
|
||||||
|
|
Loading…
Reference in New Issue