MAPREDUCE-4604. In mapred-default, mapreduce.map.maxattempts & mapreduce.reduce.maxattempts defaults are set to 4 as well as mapreduce.job.maxtaskfailures.per.tracker. (Ravi Prakash via jeagles)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1379617 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jonathan Turner Eagles 2012-08-31 21:25:25 +00:00
parent 3819c964fa
commit 94129df354
5 changed files with 27 additions and 4 deletions

View File

@ -861,6 +861,10 @@ Release 0.23.3 - UNRELEASED
MAPREDUCE-4611. MR AM dies badly when Node is decommissioned (Robert
Evans via tgraves)
MAPREDUCE-4604. In mapred-default, mapreduce.map.maxattempts &
mapreduce.reduce.maxattempts defaults are set to 4 as well as
mapreduce.job.maxtaskfailures.per.tracker. (Ravi Prakash via jeagles)
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -1357,7 +1357,7 @@ public class JobConf extends Configuration {
* @return the maximum no. of failures of a given job per tasktracker.
*/
public int getMaxTaskFailuresPerTracker() {
return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 4);
return getInt(JobContext.MAX_TASK_FAILURES_PER_TRACKER, 3);
}
/**

View File

@ -797,9 +797,12 @@
<property>
<name>mapreduce.job.maxtaskfailures.per.tracker</name>
<value>4</value>
<value>3</value>
<description>The number of task-failures on a tasktracker of a given job
after which new tasks of that job aren't assigned to it.
after which new tasks of that job aren't assigned to it. It
MUST be less than mapreduce.map.maxattempts and
mapreduce.reduce.maxattempts otherwise the failed task will
never be tried on a different node.
</description>
</property>

View File

@ -21,6 +21,7 @@ import org.junit.Assert;
import org.junit.Test;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
public class TestJobConf {
@ -185,4 +186,19 @@ public class TestJobConf {
}
/**
* Ensure that by default JobContext.MAX_TASK_FAILURES_PER_TRACKER is less
* JobContext.MAP_MAX_ATTEMPTS and JobContext.REDUCE_MAX_ATTEMPTS so that
* failed tasks will be retried on other nodes
*/
@Test
public void testMaxTaskFailuresPerTracker() {
JobConf jobConf = new JobConf(true);
Assert.assertTrue("By default JobContext.MAX_TASK_FAILURES_PER_TRACKER was "
+ "not less than JobContext.MAP_MAX_ATTEMPTS and REDUCE_MAX_ATTEMPTS"
,jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxMapAttempts() &&
jobConf.getMaxTaskFailuresPerTracker() < jobConf.getMaxReduceAttempts()
);
}
}

View File

@ -701,7 +701,7 @@
<property>
<name>mapreduce.job.maxtaskfailures.per.tracker</name>
<value>4</value>
<value>3</value>
<description>The number of task-failures on a tasktracker of a given job
after which new tasks of that job aren't assigned to it.
</description>