Merge MAPREDUCE-3567 from trunk
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1222499 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9cd2e3be26
commit
67e975d653
|
@ -89,6 +89,9 @@ Release 0.23.1 - Unreleased
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
MAPREDUCE-3567. Extraneous JobConf objects in AM heap. (Vinod Kumar
|
||||||
|
Vavilapalli via sseth)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
MAPREDUCE-2950. [Rumen] Fixed TestUserResolve. (Ravi Gummadi via amarrk)
|
MAPREDUCE-2950. [Rumen] Fixed TestUserResolve. (Ravi Gummadi via amarrk)
|
||||||
|
|
||||||
|
|
|
@ -29,9 +29,9 @@ import java.util.concurrent.LinkedBlockingQueue;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.fs.FSError;
|
import org.apache.hadoop.fs.FSError;
|
||||||
|
import org.apache.hadoop.fs.FileContext;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.FileContext;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||||
import org.apache.hadoop.mapreduce.JobContext;
|
import org.apache.hadoop.mapreduce.JobContext;
|
||||||
|
@ -41,11 +41,11 @@ import org.apache.hadoop.mapreduce.TypeConverter;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
|
||||||
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
|
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
|
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
|
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.mapred;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
import org.apache.hadoop.mapreduce.OutputCommitter;
|
import org.apache.hadoop.mapreduce.OutputCommitter;
|
||||||
|
@ -35,13 +34,14 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.yarn.Clock;
|
import org.apache.hadoop.yarn.Clock;
|
||||||
import org.apache.hadoop.yarn.event.EventHandler;
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
|
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public class MapTaskAttemptImpl extends TaskAttemptImpl {
|
public class MapTaskAttemptImpl extends TaskAttemptImpl {
|
||||||
|
|
||||||
private final TaskSplitMetaInfo splitInfo;
|
private final TaskSplitMetaInfo splitInfo;
|
||||||
|
|
||||||
public MapTaskAttemptImpl(TaskId taskId, int attempt,
|
public MapTaskAttemptImpl(TaskId taskId, int attempt,
|
||||||
EventHandler eventHandler, Path jobFile,
|
EventHandler eventHandler, Path jobFile,
|
||||||
int partition, TaskSplitMetaInfo splitInfo, Configuration conf,
|
int partition, TaskSplitMetaInfo splitInfo, JobConf conf,
|
||||||
TaskAttemptListener taskAttemptListener,
|
TaskAttemptListener taskAttemptListener,
|
||||||
OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
|
OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
||||||
|
|
|
@ -20,7 +20,6 @@ package org.apache.hadoop.mapred;
|
||||||
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
import org.apache.hadoop.mapreduce.OutputCommitter;
|
import org.apache.hadoop.mapreduce.OutputCommitter;
|
||||||
|
@ -34,14 +33,14 @@ import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.yarn.Clock;
|
import org.apache.hadoop.yarn.Clock;
|
||||||
import org.apache.hadoop.yarn.event.EventHandler;
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
|
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public class ReduceTaskAttemptImpl extends TaskAttemptImpl {
|
public class ReduceTaskAttemptImpl extends TaskAttemptImpl {
|
||||||
|
|
||||||
private final int numMapTasks;
|
private final int numMapTasks;
|
||||||
|
|
||||||
public ReduceTaskAttemptImpl(TaskId id, int attempt,
|
public ReduceTaskAttemptImpl(TaskId id, int attempt,
|
||||||
EventHandler eventHandler, Path jobFile, int partition,
|
EventHandler eventHandler, Path jobFile, int partition,
|
||||||
int numMapTasks, Configuration conf,
|
int numMapTasks, JobConf conf,
|
||||||
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
||||||
Token<JobTokenIdentifier> jobToken,
|
Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
||||||
|
|
|
@ -54,7 +54,6 @@ import org.apache.hadoop.mapreduce.jobhistory.JobSubmittedEvent;
|
||||||
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
|
import org.apache.hadoop.mapreduce.jobhistory.JobUnsuccessfulCompletionEvent;
|
||||||
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
|
import org.apache.hadoop.mapreduce.lib.chain.ChainMapper;
|
||||||
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
|
import org.apache.hadoop.mapreduce.lib.chain.ChainReducer;
|
||||||
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
|
|
||||||
import org.apache.hadoop.mapreduce.security.TokenCache;
|
import org.apache.hadoop.mapreduce.security.TokenCache;
|
||||||
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
|
import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
|
||||||
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
|
||||||
|
@ -110,6 +109,7 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
|
||||||
/** Implementation of Job interface. Maintains the state machines of Job.
|
/** Implementation of Job interface. Maintains the state machines of Job.
|
||||||
* The read and write calls use ReadWriteLock for concurrency.
|
* The read and write calls use ReadWriteLock for concurrency.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||||
EventHandler<JobEvent> {
|
EventHandler<JobEvent> {
|
||||||
|
|
||||||
|
@ -154,7 +154,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||||
// Can then replace task-level uber counters (MR-2424) with job-level ones
|
// Can then replace task-level uber counters (MR-2424) with job-level ones
|
||||||
// sent from LocalContainerLauncher, and eventually including a count of
|
// sent from LocalContainerLauncher, and eventually including a count of
|
||||||
// of uber-AM attempts (probably sent from MRAppMaster).
|
// of uber-AM attempts (probably sent from MRAppMaster).
|
||||||
public Configuration conf;
|
public JobConf conf;
|
||||||
|
|
||||||
//fields initialized in init
|
//fields initialized in init
|
||||||
private FileSystem fs;
|
private FileSystem fs;
|
||||||
|
@ -371,7 +371,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||||
this.applicationAttemptId = applicationAttemptId;
|
this.applicationAttemptId = applicationAttemptId;
|
||||||
this.jobId = jobId;
|
this.jobId = jobId;
|
||||||
this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
|
this.jobName = conf.get(JobContext.JOB_NAME, "<missing job name>");
|
||||||
this.conf = conf;
|
this.conf = new JobConf(conf);
|
||||||
this.metrics = metrics;
|
this.metrics = metrics;
|
||||||
this.clock = clock;
|
this.clock = clock;
|
||||||
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
|
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
|
||||||
|
@ -979,7 +979,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
|
||||||
job.oldJobId);
|
job.oldJobId);
|
||||||
} else {
|
} else {
|
||||||
job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
|
job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
|
||||||
new JobConf(job.conf), job.oldJobId);
|
job.conf, job.oldJobId);
|
||||||
}
|
}
|
||||||
|
|
||||||
long inputLength = 0;
|
long inputLength = 0;
|
||||||
|
|
|
@ -21,8 +21,8 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
|
import org.apache.hadoop.mapred.MapTaskAttemptImpl;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
import org.apache.hadoop.mapreduce.OutputCommitter;
|
import org.apache.hadoop.mapreduce.OutputCommitter;
|
||||||
|
@ -31,20 +31,20 @@ import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
|
||||||
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
|
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.yarn.Clock;
|
import org.apache.hadoop.yarn.Clock;
|
||||||
import org.apache.hadoop.yarn.event.EventHandler;
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
|
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public class MapTaskImpl extends TaskImpl {
|
public class MapTaskImpl extends TaskImpl {
|
||||||
|
|
||||||
private final TaskSplitMetaInfo taskSplitMetaInfo;
|
private final TaskSplitMetaInfo taskSplitMetaInfo;
|
||||||
|
|
||||||
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
|
public MapTaskImpl(JobId jobId, int partition, EventHandler eventHandler,
|
||||||
Path remoteJobConfFile, Configuration conf,
|
Path remoteJobConfFile, JobConf conf,
|
||||||
TaskSplitMetaInfo taskSplitMetaInfo,
|
TaskSplitMetaInfo taskSplitMetaInfo,
|
||||||
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
||||||
Token<JobTokenIdentifier> jobToken,
|
Token<JobTokenIdentifier> jobToken,
|
||||||
|
|
|
@ -21,8 +21,8 @@ package org.apache.hadoop.mapreduce.v2.app.job.impl;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapred.ReduceTaskAttemptImpl;
|
import org.apache.hadoop.mapred.ReduceTaskAttemptImpl;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
import org.apache.hadoop.mapreduce.OutputCommitter;
|
import org.apache.hadoop.mapreduce.OutputCommitter;
|
||||||
|
@ -30,19 +30,20 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
|
||||||
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
|
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.yarn.Clock;
|
import org.apache.hadoop.yarn.Clock;
|
||||||
import org.apache.hadoop.yarn.event.EventHandler;
|
import org.apache.hadoop.yarn.event.EventHandler;
|
||||||
|
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public class ReduceTaskImpl extends TaskImpl {
|
public class ReduceTaskImpl extends TaskImpl {
|
||||||
|
|
||||||
private final int numMapTasks;
|
private final int numMapTasks;
|
||||||
|
|
||||||
public ReduceTaskImpl(JobId jobId, int partition,
|
public ReduceTaskImpl(JobId jobId, int partition,
|
||||||
EventHandler eventHandler, Path jobFile, Configuration conf,
|
EventHandler eventHandler, Path jobFile, JobConf conf,
|
||||||
int numMapTasks, TaskAttemptListener taskAttemptListener,
|
int numMapTasks, TaskAttemptListener taskAttemptListener,
|
||||||
OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
|
OutputCommitter committer, Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
|
||||||
|
|
|
@ -125,6 +125,7 @@ import org.apache.hadoop.yarn.util.RackResolver;
|
||||||
/**
|
/**
|
||||||
* Implementation of TaskAttempt interface.
|
* Implementation of TaskAttempt interface.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public abstract class TaskAttemptImpl implements
|
public abstract class TaskAttemptImpl implements
|
||||||
org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
|
org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt,
|
||||||
EventHandler<TaskAttemptEvent> {
|
EventHandler<TaskAttemptEvent> {
|
||||||
|
@ -135,10 +136,9 @@ public abstract class TaskAttemptImpl implements
|
||||||
private static final int REDUCE_MEMORY_MB_DEFAULT = 1024;
|
private static final int REDUCE_MEMORY_MB_DEFAULT = 1024;
|
||||||
private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
|
private final static RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
|
||||||
|
|
||||||
protected final Configuration conf;
|
protected final JobConf conf;
|
||||||
protected final Path jobFile;
|
protected final Path jobFile;
|
||||||
protected final int partition;
|
protected final int partition;
|
||||||
@SuppressWarnings("rawtypes")
|
|
||||||
protected final EventHandler eventHandler;
|
protected final EventHandler eventHandler;
|
||||||
private final TaskAttemptId attemptId;
|
private final TaskAttemptId attemptId;
|
||||||
private final Clock clock;
|
private final Clock clock;
|
||||||
|
@ -445,9 +445,9 @@ public abstract class TaskAttemptImpl implements
|
||||||
.getProperty("line.separator");
|
.getProperty("line.separator");
|
||||||
|
|
||||||
public TaskAttemptImpl(TaskId taskId, int i,
|
public TaskAttemptImpl(TaskId taskId, int i,
|
||||||
@SuppressWarnings("rawtypes") EventHandler eventHandler,
|
EventHandler eventHandler,
|
||||||
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
|
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
|
||||||
Configuration conf, String[] dataLocalHosts, OutputCommitter committer,
|
JobConf conf, String[] dataLocalHosts, OutputCommitter committer,
|
||||||
Token<JobTokenIdentifier> jobToken,
|
Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
||||||
oldJobId = TypeConverter.fromYarn(taskId.getJobId());
|
oldJobId = TypeConverter.fromYarn(taskId.getJobId());
|
||||||
|
@ -1199,7 +1199,7 @@ public abstract class TaskAttemptImpl implements
|
||||||
TaskAttemptEvent event) {
|
TaskAttemptEvent event) {
|
||||||
@SuppressWarnings("deprecation")
|
@SuppressWarnings("deprecation")
|
||||||
TaskAttemptContext taskContext =
|
TaskAttemptContext taskContext =
|
||||||
new TaskAttemptContextImpl(new JobConf(taskAttempt.conf),
|
new TaskAttemptContextImpl(taskAttempt.conf,
|
||||||
TypeConverter.fromYarn(taskAttempt.attemptId));
|
TypeConverter.fromYarn(taskAttempt.attemptId));
|
||||||
taskAttempt.eventHandler.handle(new TaskCleanupEvent(
|
taskAttempt.eventHandler.handle(new TaskCleanupEvent(
|
||||||
taskAttempt.attemptId,
|
taskAttempt.attemptId,
|
||||||
|
|
|
@ -31,8 +31,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapreduce.OutputCommitter;
|
import org.apache.hadoop.mapreduce.OutputCommitter;
|
||||||
import org.apache.hadoop.mapreduce.TypeConverter;
|
import org.apache.hadoop.mapreduce.TypeConverter;
|
||||||
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
|
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
|
||||||
|
@ -50,8 +50,6 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
|
||||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
|
||||||
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
|
|
||||||
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
|
import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||||
|
@ -66,6 +64,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
|
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
|
||||||
|
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.hadoop.yarn.Clock;
|
import org.apache.hadoop.yarn.Clock;
|
||||||
|
@ -81,11 +81,12 @@ import org.apache.hadoop.yarn.state.StateMachineFactory;
|
||||||
/**
|
/**
|
||||||
* Implementation of Task interface.
|
* Implementation of Task interface.
|
||||||
*/
|
*/
|
||||||
|
@SuppressWarnings({ "rawtypes", "unchecked", "deprecation" })
|
||||||
public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
|
public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(TaskImpl.class);
|
private static final Log LOG = LogFactory.getLog(TaskImpl.class);
|
||||||
|
|
||||||
protected final Configuration conf;
|
protected final JobConf conf;
|
||||||
protected final Path jobFile;
|
protected final Path jobFile;
|
||||||
protected final OutputCommitter committer;
|
protected final OutputCommitter committer;
|
||||||
protected final int partition;
|
protected final int partition;
|
||||||
|
@ -225,7 +226,7 @@ public abstract class TaskImpl implements Task, EventHandler<TaskEvent> {
|
||||||
}
|
}
|
||||||
|
|
||||||
public TaskImpl(JobId jobId, TaskType taskType, int partition,
|
public TaskImpl(JobId jobId, TaskType taskType, int partition,
|
||||||
EventHandler eventHandler, Path remoteJobConfFile, Configuration conf,
|
EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
|
||||||
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
||||||
Token<JobTokenIdentifier> jobToken,
|
Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
|
||||||
|
|
|
@ -119,6 +119,10 @@ public class MRApp extends MRAppMaster {
|
||||||
this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
|
this(maps, reduces, autoComplete, testName, cleanOnStart, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void downloadTokensAndSetupUGI(Configuration conf) {
|
||||||
|
}
|
||||||
|
|
||||||
private static ApplicationAttemptId getApplicationAttemptId(
|
private static ApplicationAttemptId getApplicationAttemptId(
|
||||||
ApplicationId applicationId, int startCount) {
|
ApplicationId applicationId, int startCount) {
|
||||||
ApplicationAttemptId applicationAttemptId =
|
ApplicationAttemptId applicationAttemptId =
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
|
||||||
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
|
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
|
||||||
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
|
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
|
||||||
import org.apache.hadoop.yarn.YarnException;
|
import org.apache.hadoop.yarn.YarnException;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
|
||||||
import org.apache.hadoop.yarn.api.records.Container;
|
import org.apache.hadoop.yarn.api.records.Container;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
|
@ -169,7 +168,7 @@ public class MRAppBenchmark {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void benchmark1() throws Exception {
|
public void benchmark1() throws Exception {
|
||||||
int maps = 900;
|
int maps = 100000;
|
||||||
int reduces = 100;
|
int reduces = 100;
|
||||||
System.out.println("Running benchmark with maps:"+maps +
|
System.out.println("Running benchmark with maps:"+maps +
|
||||||
" reduces:"+reduces);
|
" reduces:"+reduces);
|
||||||
|
|
|
@ -17,7 +17,7 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.mapreduce.v2.app.job.impl;
|
package org.apache.hadoop.mapreduce.v2.app.job.impl;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
import static org.mockito.Mockito.when;
|
import static org.mockito.Mockito.when;
|
||||||
|
|
||||||
|
@ -29,7 +29,6 @@ import java.util.Set;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.mapred.JobConf;
|
import org.apache.hadoop.mapred.JobConf;
|
||||||
import org.apache.hadoop.mapred.Task;
|
import org.apache.hadoop.mapred.Task;
|
||||||
|
@ -60,11 +59,12 @@ import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@SuppressWarnings({ "rawtypes", "deprecation" })
|
||||||
public class TestTaskImpl {
|
public class TestTaskImpl {
|
||||||
|
|
||||||
private static final Log LOG = LogFactory.getLog(TestTaskImpl.class);
|
private static final Log LOG = LogFactory.getLog(TestTaskImpl.class);
|
||||||
|
|
||||||
private Configuration conf;
|
private JobConf conf;
|
||||||
private TaskAttemptListener taskAttemptListener;
|
private TaskAttemptListener taskAttemptListener;
|
||||||
private OutputCommitter committer;
|
private OutputCommitter committer;
|
||||||
private Token<JobTokenIdentifier> jobToken;
|
private Token<JobTokenIdentifier> jobToken;
|
||||||
|
@ -91,9 +91,8 @@ public class TestTaskImpl {
|
||||||
|
|
||||||
private int taskAttemptCounter = 0;
|
private int taskAttemptCounter = 0;
|
||||||
|
|
||||||
@SuppressWarnings("rawtypes")
|
|
||||||
public MockTaskImpl(JobId jobId, int partition,
|
public MockTaskImpl(JobId jobId, int partition,
|
||||||
EventHandler eventHandler, Path remoteJobConfFile, Configuration conf,
|
EventHandler eventHandler, Path remoteJobConfFile, JobConf conf,
|
||||||
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
TaskAttemptListener taskAttemptListener, OutputCommitter committer,
|
||||||
Token<JobTokenIdentifier> jobToken,
|
Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock,
|
||||||
|
@ -132,10 +131,9 @@ public class TestTaskImpl {
|
||||||
private TaskAttemptState state = TaskAttemptState.NEW;
|
private TaskAttemptState state = TaskAttemptState.NEW;
|
||||||
private TaskAttemptId attemptId;
|
private TaskAttemptId attemptId;
|
||||||
|
|
||||||
@SuppressWarnings("rawtypes")
|
|
||||||
public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
|
public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
|
||||||
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
|
TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
|
||||||
Configuration conf, OutputCommitter committer,
|
JobConf conf, OutputCommitter committer,
|
||||||
Token<JobTokenIdentifier> jobToken,
|
Token<JobTokenIdentifier> jobToken,
|
||||||
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
Collection<Token<? extends TokenIdentifier>> fsTokens, Clock clock) {
|
||||||
super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
|
super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
|
||||||
|
@ -175,7 +173,6 @@ public class TestTaskImpl {
|
||||||
private class MockTask extends Task {
|
private class MockTask extends Task {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
@SuppressWarnings("deprecation")
|
|
||||||
public void run(JobConf job, TaskUmbilicalProtocol umbilical)
|
public void run(JobConf job, TaskUmbilicalProtocol umbilical)
|
||||||
throws IOException, ClassNotFoundException, InterruptedException {
|
throws IOException, ClassNotFoundException, InterruptedException {
|
||||||
return;
|
return;
|
||||||
|
@ -195,7 +192,7 @@ public class TestTaskImpl {
|
||||||
|
|
||||||
++startCount;
|
++startCount;
|
||||||
|
|
||||||
conf = new Configuration();
|
conf = new JobConf();
|
||||||
taskAttemptListener = mock(TaskAttemptListener.class);
|
taskAttemptListener = mock(TaskAttemptListener.class);
|
||||||
committer = mock(OutputCommitter.class);
|
committer = mock(OutputCommitter.class);
|
||||||
jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
|
jobToken = (Token<JobTokenIdentifier>) mock(Token.class);
|
||||||
|
|
|
@ -75,7 +75,7 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
|
||||||
try {
|
try {
|
||||||
event = eventQueue.take();
|
event = eventQueue.take();
|
||||||
} catch(InterruptedException ie) {
|
} catch(InterruptedException ie) {
|
||||||
LOG.info("AsyncDispatcher thread interrupted", ie);
|
LOG.warn("AsyncDispatcher thread interrupted", ie);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (event != null) {
|
if (event != null) {
|
||||||
|
@ -114,8 +114,10 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
|
||||||
@SuppressWarnings("unchecked")
|
@SuppressWarnings("unchecked")
|
||||||
protected void dispatch(Event event) {
|
protected void dispatch(Event event) {
|
||||||
//all events go thru this loop
|
//all events go thru this loop
|
||||||
LOG.debug("Dispatching the event " + event.getClass().getName() + "."
|
if (LOG.isDebugEnabled()) {
|
||||||
+ event.toString());
|
LOG.debug("Dispatching the event " + event.getClass().getName() + "."
|
||||||
|
+ event.toString());
|
||||||
|
}
|
||||||
|
|
||||||
Class<? extends Enum> type = event.getType().getDeclaringClass();
|
Class<? extends Enum> type = event.getType().getDeclaringClass();
|
||||||
|
|
||||||
|
@ -131,12 +133,11 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@SuppressWarnings("unchecked")
|
||||||
@Override
|
@Override
|
||||||
@SuppressWarnings("rawtypes")
|
|
||||||
public void register(Class<? extends Enum> eventType,
|
public void register(Class<? extends Enum> eventType,
|
||||||
EventHandler handler) {
|
EventHandler handler) {
|
||||||
/* check to see if we have a listener registered */
|
/* check to see if we have a listener registered */
|
||||||
@SuppressWarnings("unchecked")
|
|
||||||
EventHandler<Event> registeredHandler = (EventHandler<Event>)
|
EventHandler<Event> registeredHandler = (EventHandler<Event>)
|
||||||
eventDispatchers.get(eventType);
|
eventDispatchers.get(eventType);
|
||||||
LOG.info("Registering " + eventType + " for " + handler.getClass());
|
LOG.info("Registering " + eventType + " for " + handler.getClass());
|
||||||
|
@ -170,7 +171,7 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
|
||||||
}
|
}
|
||||||
int remCapacity = eventQueue.remainingCapacity();
|
int remCapacity = eventQueue.remainingCapacity();
|
||||||
if (remCapacity < 1000) {
|
if (remCapacity < 1000) {
|
||||||
LOG.info("Very low remaining capacity in the event-queue: "
|
LOG.warn("Very low remaining capacity in the event-queue: "
|
||||||
+ remCapacity);
|
+ remCapacity);
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
|
@ -186,7 +187,6 @@ public class AsyncDispatcher extends AbstractService implements Dispatcher {
|
||||||
* are interested in the event.
|
* are interested in the event.
|
||||||
* @param <T> the type of event these multiple handlers are interested in.
|
* @param <T> the type of event these multiple handlers are interested in.
|
||||||
*/
|
*/
|
||||||
@SuppressWarnings("rawtypes")
|
|
||||||
static class MultiListenerHandler implements EventHandler<Event> {
|
static class MultiListenerHandler implements EventHandler<Event> {
|
||||||
List<EventHandler<Event>> listofHandlers;
|
List<EventHandler<Event>> listofHandlers;
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue