MAPREUDUCE-5059. Change average merge time on Job overview page to be the time delta between the end of the shuffle and the start of the reduce. Contributed by Omkar Vinit Joshi.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1467120 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
390deffbaf
commit
5fd460e688
|
@ -188,6 +188,10 @@ Release 2.0.5-beta - UNRELEASED
|
||||||
MAPREDUCE-4981. Add WordMean, WordMedian, WordStandardDeviation
|
MAPREDUCE-4981. Add WordMean, WordMedian, WordStandardDeviation
|
||||||
to ExamplesDriver. (Plamen Jeliazkov via shv)
|
to ExamplesDriver. (Plamen Jeliazkov via shv)
|
||||||
|
|
||||||
|
MAPREUDUCE-5059. Change average merge time on Job overview page to be the
|
||||||
|
time delta between the end of the shuffle and the start of the reduce.
|
||||||
|
(Omkar Vinit Joshi via vinodkv)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -287,7 +287,7 @@ public class JobInfo {
|
||||||
avgShuffleTime += (attempt.getShuffleFinishTime() - attempt
|
avgShuffleTime += (attempt.getShuffleFinishTime() - attempt
|
||||||
.getLaunchTime());
|
.getLaunchTime());
|
||||||
avgMergeTime += attempt.getSortFinishTime()
|
avgMergeTime += attempt.getSortFinishTime()
|
||||||
- attempt.getLaunchTime();
|
- attempt.getShuffleFinishTime();
|
||||||
avgReduceTime += (attempt.getFinishTime() - attempt
|
avgReduceTime += (attempt.getFinishTime() - attempt
|
||||||
.getShuffleFinishTime());
|
.getShuffleFinishTime());
|
||||||
}
|
}
|
||||||
|
|
|
@ -94,11 +94,11 @@ public class TestJobHistoryEntities {
|
||||||
assertEquals(1, completedJob.getAMInfos().size());
|
assertEquals(1, completedJob.getAMInfos().size());
|
||||||
assertEquals(10, completedJob.getCompletedMaps());
|
assertEquals(10, completedJob.getCompletedMaps());
|
||||||
assertEquals(1, completedJob.getCompletedReduces());
|
assertEquals(1, completedJob.getCompletedReduces());
|
||||||
assertEquals(11, completedJob.getTasks().size());
|
assertEquals(12, completedJob.getTasks().size());
|
||||||
//Verify tasks loaded at this point.
|
//Verify tasks loaded at this point.
|
||||||
assertEquals(true, completedJob.tasksLoaded.get());
|
assertEquals(true, completedJob.tasksLoaded.get());
|
||||||
assertEquals(10, completedJob.getTasks(TaskType.MAP).size());
|
assertEquals(10, completedJob.getTasks(TaskType.MAP).size());
|
||||||
assertEquals(1, completedJob.getTasks(TaskType.REDUCE).size());
|
assertEquals(2, completedJob.getTasks(TaskType.REDUCE).size());
|
||||||
assertEquals("user", completedJob.getUserName());
|
assertEquals("user", completedJob.getUserName());
|
||||||
assertEquals(JobState.SUCCEEDED, completedJob.getState());
|
assertEquals(JobState.SUCCEEDED, completedJob.getState());
|
||||||
JobReport jobReport = completedJob.getReport();
|
JobReport jobReport = completedJob.getReport();
|
||||||
|
@ -119,7 +119,7 @@ public class TestJobHistoryEntities {
|
||||||
Map<TaskId, Task> mapTasks = completedJob.getTasks(TaskType.MAP);
|
Map<TaskId, Task> mapTasks = completedJob.getTasks(TaskType.MAP);
|
||||||
Map<TaskId, Task> reduceTasks = completedJob.getTasks(TaskType.REDUCE);
|
Map<TaskId, Task> reduceTasks = completedJob.getTasks(TaskType.REDUCE);
|
||||||
assertEquals(10, mapTasks.size());
|
assertEquals(10, mapTasks.size());
|
||||||
assertEquals(1, reduceTasks.size());
|
assertEquals(2, reduceTasks.size());
|
||||||
|
|
||||||
Task mt1 = mapTasks.get(mt1Id);
|
Task mt1 = mapTasks.get(mt1Id);
|
||||||
assertEquals(1, mt1.getAttempts().size());
|
assertEquals(1, mt1.getAttempts().size());
|
||||||
|
@ -196,12 +196,12 @@ public class TestJobHistoryEntities {
|
||||||
assertEquals("default",completedJob.getQueueName());
|
assertEquals("default",completedJob.getQueueName());
|
||||||
// progress
|
// progress
|
||||||
assertEquals(1.0, completedJob.getProgress(),0.001);
|
assertEquals(1.0, completedJob.getProgress(),0.001);
|
||||||
// 11 rows in answer
|
// 12 rows in answer
|
||||||
assertEquals(11,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
|
assertEquals(12,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
|
||||||
// select first 10 rows
|
// select first 10 rows
|
||||||
assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
|
assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
|
||||||
// select 5-10 rows include 5th
|
// select 5-10 rows include 5th
|
||||||
assertEquals(6,completedJob.getTaskAttemptCompletionEvents(5,10).length);
|
assertEquals(7,completedJob.getTaskAttemptCompletionEvents(5,10).length);
|
||||||
|
|
||||||
// without errors
|
// without errors
|
||||||
assertEquals(1,completedJob.getDiagnostics().size());
|
assertEquals(1,completedJob.getDiagnostics().size());
|
||||||
|
|
|
@ -0,0 +1,69 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
|
||||||
|
|
||||||
|
import static org.mockito.Mockito.mock;
|
||||||
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
|
||||||
|
import junit.framework.Assert;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.mapred.JobACLsManager;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.hs.HistoryFileManager.HistoryFileInfo;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.hs.CompletedJob;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEntities;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
public class TestJobInfo {
|
||||||
|
|
||||||
|
@Test(timeout = 10000)
|
||||||
|
public void testAverageMergeTime() throws IOException {
|
||||||
|
String historyFileName =
|
||||||
|
"job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist";
|
||||||
|
String confFileName =
|
||||||
|
"job_1329348432655_0001_conf.xml";
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
JobACLsManager jobAclsMgr = new JobACLsManager(conf);
|
||||||
|
Path fulleHistoryPath =
|
||||||
|
new Path(TestJobHistoryEntities.class.getClassLoader()
|
||||||
|
.getResource(historyFileName)
|
||||||
|
.getFile());
|
||||||
|
Path fullConfPath =
|
||||||
|
new Path(TestJobHistoryEntities.class.getClassLoader()
|
||||||
|
.getResource(confFileName)
|
||||||
|
.getFile());
|
||||||
|
|
||||||
|
HistoryFileInfo info = mock(HistoryFileInfo.class);
|
||||||
|
when(info.getConfFile()).thenReturn(fullConfPath);
|
||||||
|
|
||||||
|
JobId jobId = MRBuilderUtils.newJobId(1329348432655l, 1, 1);
|
||||||
|
CompletedJob completedJob =
|
||||||
|
new CompletedJob(conf, jobId, fulleHistoryPath, true, "user",
|
||||||
|
info, jobAclsMgr);
|
||||||
|
JobInfo jobInfo = new JobInfo(completedJob);
|
||||||
|
// There are 2 tasks with merge time of 45 and 55 respectively. So average
|
||||||
|
// merge time should be 50.
|
||||||
|
Assert.assertEquals(50L, jobInfo.getAvgMergeTime().longValue());
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because one or more lines are too long
Loading…
Reference in New Issue