MAPREDUCE-2886. Fix Javadoc warnings in MapReduce. (mahadev) - Merge r1163050 from trunk.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1163051 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
0165337cc0
commit
e123403fda
|
@ -1168,6 +1168,8 @@ Release 0.23.0 - Unreleased
|
||||||
MAPREDUCE-2737. Update the progress of jobs on client side. (Siddharth Seth
|
MAPREDUCE-2737. Update the progress of jobs on client side. (Siddharth Seth
|
||||||
and Mahadev Konar via mahadev)
|
and Mahadev Konar via mahadev)
|
||||||
|
|
||||||
|
MAPREDUCE-2886. Fix Javadoc warnings in MapReduce. (mahadev)
|
||||||
|
|
||||||
Release 0.22.0 - Unreleased
|
Release 0.22.0 - Unreleased
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -44,7 +44,7 @@ public interface TaskRuntimeEstimator {
|
||||||
* already elapsed. If the projected total execution time for this task
|
* already elapsed. If the projected total execution time for this task
|
||||||
* ever exceeds its reasonable execution time, we may speculate it.
|
* ever exceeds its reasonable execution time, we may speculate it.
|
||||||
*
|
*
|
||||||
* @param id the {@link TaskID} of the task we are asking about
|
* @param id the {@link TaskId} of the task we are asking about
|
||||||
* @return the task's maximum reasonable runtime, or MAX_VALUE if
|
* @return the task's maximum reasonable runtime, or MAX_VALUE if
|
||||||
* we don't have enough information to rule out any runtime,
|
* we don't have enough information to rule out any runtime,
|
||||||
* however long.
|
* however long.
|
||||||
|
@ -57,7 +57,7 @@ public interface TaskRuntimeEstimator {
|
||||||
* Estimate a task attempt's total runtime. Includes the time already
|
* Estimate a task attempt's total runtime. Includes the time already
|
||||||
* elapsed.
|
* elapsed.
|
||||||
*
|
*
|
||||||
* @param id the {@link TaskAttemptID} of the attempt we are asking about
|
* @param id the {@link TaskAttemptId} of the attempt we are asking about
|
||||||
* @return our best estimate of the attempt's runtime, or {@code -1} if
|
* @return our best estimate of the attempt's runtime, or {@code -1} if
|
||||||
* we don't have enough information yet to produce an estimate.
|
* we don't have enough information yet to produce an estimate.
|
||||||
*
|
*
|
||||||
|
@ -69,7 +69,7 @@ public interface TaskRuntimeEstimator {
|
||||||
* Estimates how long a new attempt on this task will take if we start
|
* Estimates how long a new attempt on this task will take if we start
|
||||||
* one now
|
* one now
|
||||||
*
|
*
|
||||||
* @param id the {@link TaskID} of the task we are asking about
|
* @param id the {@link TaskId} of the task we are asking about
|
||||||
* @return our best estimate of a new attempt's runtime, or {@code -1} if
|
* @return our best estimate of a new attempt's runtime, or {@code -1} if
|
||||||
* we don't have enough information yet to produce an estimate.
|
* we don't have enough information yet to produce an estimate.
|
||||||
*
|
*
|
||||||
|
@ -79,9 +79,9 @@ public interface TaskRuntimeEstimator {
|
||||||
/**
|
/**
|
||||||
*
|
*
|
||||||
* Computes the width of the error band of our estimate of the task
|
* Computes the width of the error band of our estimate of the task
|
||||||
* runtime as returned by {@link estimatedRuntime}
|
* runtime as returned by {@link #estimatedRuntime(TaskAttemptId)}
|
||||||
*
|
*
|
||||||
* @param id the {@link TaskAttemptID} of the attempt we are asking about
|
* @param id the {@link TaskAttemptId} of the attempt we are asking about
|
||||||
* @return our best estimate of the attempt's runtime, or {@code -1} if
|
* @return our best estimate of the attempt's runtime, or {@code -1} if
|
||||||
* we don't have enough information yet to produce an estimate.
|
* we don't have enough information yet to produce an estimate.
|
||||||
*
|
*
|
||||||
|
|
|
@ -127,7 +127,7 @@ public class JobHistoryUtils {
|
||||||
/**
|
/**
|
||||||
* Checks whether the provided path string is a valid job history file.
|
* Checks whether the provided path string is a valid job history file.
|
||||||
* @param pathString the path to be checked.
|
* @param pathString the path to be checked.
|
||||||
* @return
|
* @return true is the path is a valid job history filename else return false
|
||||||
*/
|
*/
|
||||||
public static boolean isValidJobHistoryFileName(String pathString) {
|
public static boolean isValidJobHistoryFileName(String pathString) {
|
||||||
return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION);
|
return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION);
|
||||||
|
@ -148,7 +148,7 @@ public class JobHistoryUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets a PathFilter which would match configuration files.
|
* Gets a PathFilter which would match configuration files.
|
||||||
* @return
|
* @return the patch filter {@link PathFilter} for matching conf files.
|
||||||
*/
|
*/
|
||||||
public static PathFilter getConfFileFilter() {
|
public static PathFilter getConfFileFilter() {
|
||||||
return CONF_FILTER;
|
return CONF_FILTER;
|
||||||
|
@ -156,7 +156,7 @@ public class JobHistoryUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets a PathFilter which would match job history file names.
|
* Gets a PathFilter which would match job history file names.
|
||||||
* @return
|
* @return the path filter {@link PathFilter} matching job history files.
|
||||||
*/
|
*/
|
||||||
public static PathFilter getHistoryFileFilter() {
|
public static PathFilter getHistoryFileFilter() {
|
||||||
return JOB_HISTORY_FILE_FILTER;
|
return JOB_HISTORY_FILE_FILTER;
|
||||||
|
@ -194,8 +194,8 @@ public class JobHistoryUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the configured directory prefix for Done history files.
|
* Gets the configured directory prefix for Done history files.
|
||||||
* @param conf
|
* @param conf the configuration object
|
||||||
* @return
|
* @return the done history directory
|
||||||
*/
|
*/
|
||||||
public static String getConfiguredHistoryServerDoneDirPrefix(
|
public static String getConfiguredHistoryServerDoneDirPrefix(
|
||||||
Configuration conf) {
|
Configuration conf) {
|
||||||
|
@ -209,8 +209,8 @@ public class JobHistoryUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Gets the user directory for intermediate done history files.
|
* Gets the user directory for intermediate done history files.
|
||||||
* @param conf
|
* @param conf the configuration object
|
||||||
* @return
|
* @return the intermediate done directory for jobhistory files.
|
||||||
*/
|
*/
|
||||||
public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
|
public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
|
||||||
return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator
|
return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator
|
||||||
|
@ -262,7 +262,7 @@ public class JobHistoryUtils {
|
||||||
* @param logDir the log directory prefix.
|
* @param logDir the log directory prefix.
|
||||||
* @param jobId the jobId.
|
* @param jobId the jobId.
|
||||||
* @param attempt attempt number for this job.
|
* @param attempt attempt number for this job.
|
||||||
* @return
|
* @return the conf file path for jobs in progress.
|
||||||
*/
|
*/
|
||||||
public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) {
|
public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) {
|
||||||
Path jobFilePath = null;
|
Path jobFilePath = null;
|
||||||
|
@ -277,7 +277,7 @@ public class JobHistoryUtils {
|
||||||
* Gets the serial number part of the path based on the jobId and serialNumber format.
|
* Gets the serial number part of the path based on the jobId and serialNumber format.
|
||||||
* @param id
|
* @param id
|
||||||
* @param serialNumberFormat
|
* @param serialNumberFormat
|
||||||
* @return
|
* @return the serial number part of the patch based on the jobId and serial number format.
|
||||||
*/
|
*/
|
||||||
public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) {
|
public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) {
|
||||||
return String.format(serialNumberFormat,
|
return String.format(serialNumberFormat,
|
||||||
|
@ -287,7 +287,7 @@ public class JobHistoryUtils {
|
||||||
|
|
||||||
/**Extracts the timstamp component from the path.
|
/**Extracts the timstamp component from the path.
|
||||||
* @param path
|
* @param path
|
||||||
* @return
|
* @return the timestamp component from the path
|
||||||
*/
|
*/
|
||||||
public static String getTimestampPartFromPath(String path) {
|
public static String getTimestampPartFromPath(String path) {
|
||||||
Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path);
|
Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path);
|
||||||
|
@ -305,7 +305,7 @@ public class JobHistoryUtils {
|
||||||
* @param id
|
* @param id
|
||||||
* @param timestampComponent
|
* @param timestampComponent
|
||||||
* @param serialNumberFormat
|
* @param serialNumberFormat
|
||||||
* @return
|
* @return the history sub directory based on the jobid, timestamp and serial number format
|
||||||
*/
|
*/
|
||||||
public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) {
|
public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) {
|
||||||
// String result = LOG_VERSION_STRING;
|
// String result = LOG_VERSION_STRING;
|
||||||
|
@ -324,7 +324,7 @@ public class JobHistoryUtils {
|
||||||
* Gets the timestamp component based on millisecond time.
|
* Gets the timestamp component based on millisecond time.
|
||||||
* @param millisecondTime
|
* @param millisecondTime
|
||||||
* @param debugMode
|
* @param debugMode
|
||||||
* @return
|
* @return the timestamp component based on millisecond time
|
||||||
*/
|
*/
|
||||||
public static String timestampDirectoryComponent(long millisecondTime, boolean debugMode) {
|
public static String timestampDirectoryComponent(long millisecondTime, boolean debugMode) {
|
||||||
Calendar timestamp = Calendar.getInstance();
|
Calendar timestamp = Calendar.getInstance();
|
||||||
|
@ -350,7 +350,7 @@ public class JobHistoryUtils {
|
||||||
/**
|
/**
|
||||||
* Computes a serial number used as part of directory naming for the given jobId.
|
* Computes a serial number used as part of directory naming for the given jobId.
|
||||||
* @param id the jobId.
|
* @param id the jobId.
|
||||||
* @return
|
* @return the serial number used as part of directory naming for the given jobid
|
||||||
*/
|
*/
|
||||||
public static int jobSerialNumber(JobId id) {
|
public static int jobSerialNumber(JobId id) {
|
||||||
return id.getId();
|
return id.getId();
|
||||||
|
|
|
@ -372,7 +372,7 @@ public class Counters
|
||||||
* @param id the id of the counter within the group (0 to N-1)
|
* @param id the id of the counter within the group (0 to N-1)
|
||||||
* @param name the internal name of the counter
|
* @param name the internal name of the counter
|
||||||
* @return the counter for that name
|
* @return the counter for that name
|
||||||
* @deprecated use {@link findCounter(String, String)} instead
|
* @deprecated use {@link #findCounter(String, String)} instead
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public Counter findCounter(String group, int id, String name) {
|
public Counter findCounter(String group, int id, String name) {
|
||||||
|
|
|
@ -49,7 +49,7 @@ import org.apache.hadoop.util.ToolRunner;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <code>JobClient</code> is the primary interface for the user-job to interact
|
* <code>JobClient</code> is the primary interface for the user-job to interact
|
||||||
* with the {@link JobTracker}.
|
* with the cluster.
|
||||||
*
|
*
|
||||||
* <code>JobClient</code> provides facilities to submit jobs, track their
|
* <code>JobClient</code> provides facilities to submit jobs, track their
|
||||||
* progress, access component-tasks' reports/logs, get the Map-Reduce cluster
|
* progress, access component-tasks' reports/logs, get the Map-Reduce cluster
|
||||||
|
@ -72,7 +72,7 @@ import org.apache.hadoop.util.ToolRunner;
|
||||||
* on the distributed file-system.
|
* on the distributed file-system.
|
||||||
* </li>
|
* </li>
|
||||||
* <li>
|
* <li>
|
||||||
* Submitting the job to the <code>JobTracker</code> and optionally monitoring
|
* Submitting the job to the cluster and optionally monitoring
|
||||||
* it's status.
|
* it's status.
|
||||||
* </li>
|
* </li>
|
||||||
* </ol></p>
|
* </ol></p>
|
||||||
|
@ -152,7 +152,7 @@ public class JobClient extends CLI {
|
||||||
/**
|
/**
|
||||||
* We store a JobProfile and a timestamp for when we last
|
* We store a JobProfile and a timestamp for when we last
|
||||||
* acquired the job profile. If the job is null, then we cannot
|
* acquired the job profile. If the job is null, then we cannot
|
||||||
* perform any of the tasks. The job might be null if the JobTracker
|
* perform any of the tasks. The job might be null if the cluster
|
||||||
* has completely forgotten about the job. (eg, 24 hours after the
|
* has completely forgotten about the job. (eg, 24 hours after the
|
||||||
* job completes.)
|
* job completes.)
|
||||||
*/
|
*/
|
||||||
|
@ -348,7 +348,7 @@ public class JobClient extends CLI {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Fetch task completion events from jobtracker for this job.
|
* Fetch task completion events from cluster for this job.
|
||||||
*/
|
*/
|
||||||
public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
|
public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
|
||||||
int startFrom) throws IOException {
|
int startFrom) throws IOException {
|
||||||
|
@ -429,7 +429,7 @@ public class JobClient extends CLI {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build a job client with the given {@link JobConf}, and connect to the
|
* Build a job client with the given {@link JobConf}, and connect to the
|
||||||
* default {@link JobTracker}.
|
* default cluster
|
||||||
*
|
*
|
||||||
* @param conf the job configuration.
|
* @param conf the job configuration.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
@ -440,7 +440,7 @@ public class JobClient extends CLI {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Build a job client with the given {@link Configuration},
|
* Build a job client with the given {@link Configuration},
|
||||||
* and connect to the default {@link JobTracker}.
|
* and connect to the default cluster
|
||||||
*
|
*
|
||||||
* @param conf the configuration.
|
* @param conf the configuration.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
|
@ -450,7 +450,7 @@ public class JobClient extends CLI {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Connect to the default {@link JobTracker}.
|
* Connect to the default cluster
|
||||||
* @param conf the job configuration.
|
* @param conf the job configuration.
|
||||||
* @throws IOException
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
|
|
|
@ -476,7 +476,6 @@ public class JobConf extends Configuration {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
|
* Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
|
||||||
* @see org.apache.hadoop.mapreduce.util.MRAsyncDiskService#cleanupAllVolumes()
|
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
public void deleteLocalFiles() throws IOException {
|
public void deleteLocalFiles() throws IOException {
|
||||||
|
|
|
@ -20,7 +20,7 @@ package org.apache.hadoop.mapred;
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
/** The states of a {@link TaskInProgress} as seen by the JobTracker.
|
/** The states of a Tasks.
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
|
|
|
@ -43,8 +43,6 @@ import org.apache.hadoop.io.Text;
|
||||||
*
|
*
|
||||||
* @see TaskID
|
* @see TaskID
|
||||||
* @see TaskAttemptID
|
* @see TaskAttemptID
|
||||||
* @see org.apache.hadoop.mapred.JobTracker#getNewJobId()
|
|
||||||
* @see org.apache.hadoop.mapred.JobTracker#getStartTime()
|
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Public
|
@InterfaceAudience.Public
|
||||||
@InterfaceStability.Stable
|
@InterfaceStability.Stable
|
||||||
|
|
|
@ -22,8 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
/**
|
/**
|
||||||
* Place holder for cluster level configuration keys.
|
* Place holder for cluster level configuration keys.
|
||||||
*
|
*
|
||||||
* These keys are used by both {@link JobTracker} and {@link TaskTracker}. The
|
* The keys should have "mapreduce.cluster." as the prefix.
|
||||||
* keys should have "mapreduce.cluster." as the prefix.
|
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
|
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
|
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
|
||||||
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
|
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
|
||||||
|
import org.apache.hadoop.yarn.api.records.Container;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
||||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||||
|
@ -114,7 +115,7 @@ public interface ContainerManager {
|
||||||
*
|
*
|
||||||
* @param request request to get <code>ContainerStatus</code> of a container
|
* @param request request to get <code>ContainerStatus</code> of a container
|
||||||
* with the specified <code>ContainerId</code>
|
* with the specified <code>ContainerId</code>
|
||||||
* @return
|
* @return the <code>ContainerStatus</code> of the container
|
||||||
* @throws YarnRemoteException
|
* @throws YarnRemoteException
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
|
|
|
@ -113,7 +113,6 @@ public interface AMResponse {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get available headroom for resources in the cluster for the application.
|
* Get available headroom for resources in the cluster for the application.
|
||||||
* @param limit available headroom for resources in the cluster for the application
|
|
||||||
*/
|
*/
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
|
|
|
@ -50,7 +50,7 @@ public class ConverterUtils {
|
||||||
*
|
*
|
||||||
* @param url
|
* @param url
|
||||||
* url to convert
|
* url to convert
|
||||||
* @return
|
* @return path from {@link URL}
|
||||||
* @throws URISyntaxException
|
* @throws URISyntaxException
|
||||||
*/
|
*/
|
||||||
public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
|
public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
|
||||||
|
@ -63,8 +63,8 @@ public class ConverterUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* change from CharSequence to string for map key and value
|
* change from CharSequence to string for map key and value
|
||||||
* @param env
|
* @param env map for converting
|
||||||
* @return
|
* @return string,string map
|
||||||
*/
|
*/
|
||||||
public static Map<String, String> convertToString(
|
public static Map<String, String> convertToString(
|
||||||
Map<CharSequence, CharSequence> env) {
|
Map<CharSequence, CharSequence> env) {
|
||||||
|
|
|
@ -221,8 +221,7 @@ public class ProcfsBasedProcessTree {
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Verify that the given process id is same as its process group id.
|
/** Verify that the given process id is same as its process group id.
|
||||||
* @param pidStr Process id of the to-be-verified-process
|
* @return true if the process id matches else return false.
|
||||||
* @param procfsDir Procfs root dir
|
|
||||||
*/
|
*/
|
||||||
public boolean checkPidPgrpidForMatch() {
|
public boolean checkPidPgrpidForMatch() {
|
||||||
return checkPidPgrpidForMatch(pid, PROCFS);
|
return checkPidPgrpidForMatch(pid, PROCFS);
|
||||||
|
|
|
@ -62,7 +62,7 @@ public class RackResolver {
|
||||||
* right resolver implementation.
|
* right resolver implementation.
|
||||||
* @param conf
|
* @param conf
|
||||||
* @param hostName
|
* @param hostName
|
||||||
* @return
|
* @return node {@link Node} after resolving the hostname
|
||||||
*/
|
*/
|
||||||
public static Node resolve(Configuration conf, String hostName) {
|
public static Node resolve(Configuration conf, String hostName) {
|
||||||
init(conf);
|
init(conf);
|
||||||
|
@ -74,7 +74,7 @@ public class RackResolver {
|
||||||
* network topology. This method doesn't initialize the class.
|
* network topology. This method doesn't initialize the class.
|
||||||
* Call {@link #init(Configuration)} explicitly.
|
* Call {@link #init(Configuration)} explicitly.
|
||||||
* @param hostName
|
* @param hostName
|
||||||
* @return
|
* @return node {@link Node} after resolving the hostname
|
||||||
*/
|
*/
|
||||||
public static Node resolve(String hostName) {
|
public static Node resolve(String hostName) {
|
||||||
if (!initCalled) {
|
if (!initCalled) {
|
||||||
|
|
|
@ -77,11 +77,18 @@ public abstract class ContainerExecutor implements Configurable {
|
||||||
List<Path> localDirs)
|
List<Path> localDirs)
|
||||||
throws IOException, InterruptedException;
|
throws IOException, InterruptedException;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Launch the container on the node. This is a blocking call and returns only
|
* Launch the container on the node. This is a blocking call and returns only
|
||||||
* when the container exits.
|
* when the container exits.
|
||||||
*
|
* @param container the container to be launched
|
||||||
* @param launchCtxt
|
* @param nmPrivateContainerScriptPath the path for launch script
|
||||||
|
* @param nmPrivateTokensPath the path for tokens for the container
|
||||||
|
* @param user the user of the container
|
||||||
|
* @param appId the appId of the container
|
||||||
|
* @param containerWorkDir the work dir for the container
|
||||||
|
* @return the return status of the launch
|
||||||
|
* @throws IOException
|
||||||
*/
|
*/
|
||||||
public abstract int launchContainer(Container container,
|
public abstract int launchContainer(Container container,
|
||||||
Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,
|
Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,
|
||||||
|
|
|
@ -61,10 +61,13 @@ public class DeletionService extends AbstractService {
|
||||||
this.debugDelay = 0;
|
this.debugDelay = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
*
|
||||||
/**
|
/**
|
||||||
* Delete the path(s) as this user.
|
* Delete the path(s) as this user.
|
||||||
* @param user The user to delete as, or the JVM user if null
|
* @param user The user to delete as, or the JVM user if null
|
||||||
* @param p Paths to delete
|
* @param subDir the sub directory name
|
||||||
|
* @param baseDirs the base directories which contains the subDir's
|
||||||
*/
|
*/
|
||||||
public void delete(String user, Path subDir, Path... baseDirs) {
|
public void delete(String user, Path subDir, Path... baseDirs) {
|
||||||
// TODO if parent owned by NM, rename within parent inline
|
// TODO if parent owned by NM, rename within parent inline
|
||||||
|
|
|
@ -460,7 +460,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* return the scheduler.
|
* return the scheduler.
|
||||||
* @return
|
* @return the scheduler for the Resource Manager.
|
||||||
*/
|
*/
|
||||||
@Private
|
@Private
|
||||||
public ResourceScheduler getResourceScheduler() {
|
public ResourceScheduler getResourceScheduler() {
|
||||||
|
@ -469,7 +469,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* return the resource tracking component.
|
* return the resource tracking component.
|
||||||
* @return
|
* @return the resource tracking component.
|
||||||
*/
|
*/
|
||||||
@Private
|
@Private
|
||||||
public ResourceTrackerService getResourceTrackerService() {
|
public ResourceTrackerService getResourceTrackerService() {
|
||||||
|
|
|
@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This interface is the one implemented by the schedulers. It mainly extends
|
* This interface is the one implemented by the schedulers. It mainly extends
|
||||||
* {@link ResourceListener} and {@link YarnScheduler}.
|
* {@link YarnScheduler}.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
@LimitedPrivate("yarn")
|
@LimitedPrivate("yarn")
|
||||||
|
|
|
@ -360,7 +360,7 @@ public class SchedulerApp {
|
||||||
* given <code>priority</code>?
|
* given <code>priority</code>?
|
||||||
* @param node node to be checked
|
* @param node node to be checked
|
||||||
* @param priority priority of reserved container
|
* @param priority priority of reserved container
|
||||||
* @return
|
* @return true is reserved, false if not
|
||||||
*/
|
*/
|
||||||
public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
|
public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
|
||||||
Map<NodeId, RMContainer> reservedContainers =
|
Map<NodeId, RMContainer> reservedContainers =
|
||||||
|
|
|
@ -90,7 +90,7 @@ public class SchedulerNode {
|
||||||
* given application.
|
* given application.
|
||||||
*
|
*
|
||||||
* @param applicationId application
|
* @param applicationId application
|
||||||
* @param containers allocated containers
|
* @param rmContainer allocated container
|
||||||
*/
|
*/
|
||||||
public synchronized void allocateContainer(ApplicationId applicationId,
|
public synchronized void allocateContainer(ApplicationId applicationId,
|
||||||
RMContainer rmContainer) {
|
RMContainer rmContainer) {
|
||||||
|
|
|
@ -155,7 +155,7 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
|
||||||
* Assign containers to applications in the queue or it's children (if any).
|
* Assign containers to applications in the queue or it's children (if any).
|
||||||
* @param clusterResource the resource of the cluster.
|
* @param clusterResource the resource of the cluster.
|
||||||
* @param node node on which resources are available
|
* @param node node on which resources are available
|
||||||
* @return
|
* @return the resource that is being assigned.
|
||||||
*/
|
*/
|
||||||
public Resource assignContainers(Resource clusterResource, SchedulerNode node);
|
public Resource assignContainers(Resource clusterResource, SchedulerNode node);
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue