diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 8948247f4e6..7f07db3ac5d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1168,6 +1168,8 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2737. Update the progress of jobs on client side. (Siddharth Seth
and Mahadev Konar via mahadev)
+ MAPREDUCE-2886. Fix Javadoc warnings in MapReduce. (mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
index 93e5ae3d953..ce4825ff225 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/TaskRuntimeEstimator.java
@@ -44,7 +44,7 @@ public interface TaskRuntimeEstimator {
* already elapsed. If the projected total execution time for this task
* ever exceeds its reasonable execution time, we may speculate it.
*
- * @param id the {@link TaskID} of the task we are asking about
+ * @param id the {@link TaskId} of the task we are asking about
* @return the task's maximum reasonable runtime, or MAX_VALUE if
* we don't have enough information to rule out any runtime,
* however long.
@@ -57,7 +57,7 @@ public interface TaskRuntimeEstimator {
* Estimate a task attempt's total runtime. Includes the time already
* elapsed.
*
- * @param id the {@link TaskAttemptID} of the attempt we are asking about
+ * @param id the {@link TaskAttemptId} of the attempt we are asking about
* @return our best estimate of the attempt's runtime, or {@code -1} if
* we don't have enough information yet to produce an estimate.
*
@@ -69,7 +69,7 @@ public interface TaskRuntimeEstimator {
* Estimates how long a new attempt on this task will take if we start
* one now
*
- * @param id the {@link TaskID} of the task we are asking about
+ * @param id the {@link TaskId} of the task we are asking about
* @return our best estimate of a new attempt's runtime, or {@code -1} if
* we don't have enough information yet to produce an estimate.
*
@@ -79,9 +79,9 @@ public interface TaskRuntimeEstimator {
/**
*
* Computes the width of the error band of our estimate of the task
- * runtime as returned by {@link estimatedRuntime}
+ * runtime as returned by {@link #estimatedRuntime(TaskAttemptId)}
*
- * @param id the {@link TaskAttemptID} of the attempt we are asking about
+ * @param id the {@link TaskAttemptId} of the attempt we are asking about
* @return our best estimate of the attempt's runtime, or {@code -1} if
* we don't have enough information yet to produce an estimate.
*
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
index ae87f58d7ba..ee3e60e77a4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
@@ -127,7 +127,7 @@ public class JobHistoryUtils {
/**
* Checks whether the provided path string is a valid job history file.
* @param pathString the path to be checked.
- * @return
+ * @return true is the path is a valid job history filename else return false
*/
public static boolean isValidJobHistoryFileName(String pathString) {
return pathString.endsWith(JOB_HISTORY_FILE_EXTENSION);
@@ -148,7 +148,7 @@ public class JobHistoryUtils {
/**
* Gets a PathFilter which would match configuration files.
- * @return
+ * @return the patch filter {@link PathFilter} for matching conf files.
*/
public static PathFilter getConfFileFilter() {
return CONF_FILTER;
@@ -156,7 +156,7 @@ public class JobHistoryUtils {
/**
* Gets a PathFilter which would match job history file names.
- * @return
+ * @return the path filter {@link PathFilter} matching job history files.
*/
public static PathFilter getHistoryFileFilter() {
return JOB_HISTORY_FILE_FILTER;
@@ -194,8 +194,8 @@ public class JobHistoryUtils {
/**
* Gets the configured directory prefix for Done history files.
- * @param conf
- * @return
+ * @param conf the configuration object
+ * @return the done history directory
*/
public static String getConfiguredHistoryServerDoneDirPrefix(
Configuration conf) {
@@ -209,8 +209,8 @@ public class JobHistoryUtils {
/**
* Gets the user directory for intermediate done history files.
- * @param conf
- * @return
+ * @param conf the configuration object
+ * @return the intermediate done directory for jobhistory files.
*/
public static String getHistoryIntermediateDoneDirForUser(Configuration conf) throws IOException {
return getConfiguredHistoryIntermediateDoneDirPrefix(conf) + File.separator
@@ -262,7 +262,7 @@ public class JobHistoryUtils {
* @param logDir the log directory prefix.
* @param jobId the jobId.
* @param attempt attempt number for this job.
- * @return
+ * @return the conf file path for jobs in progress.
*/
public static Path getStagingConfFile(Path logDir, JobId jobId, int attempt) {
Path jobFilePath = null;
@@ -277,7 +277,7 @@ public class JobHistoryUtils {
* Gets the serial number part of the path based on the jobId and serialNumber format.
* @param id
* @param serialNumberFormat
- * @return
+ * @return the serial number part of the patch based on the jobId and serial number format.
*/
public static String serialNumberDirectoryComponent(JobId id, String serialNumberFormat) {
return String.format(serialNumberFormat,
@@ -287,7 +287,7 @@ public class JobHistoryUtils {
/**Extracts the timstamp component from the path.
* @param path
- * @return
+ * @return the timestamp component from the path
*/
public static String getTimestampPartFromPath(String path) {
Matcher matcher = TIMESTAMP_DIR_PATTERN.matcher(path);
@@ -305,7 +305,7 @@ public class JobHistoryUtils {
* @param id
* @param timestampComponent
* @param serialNumberFormat
- * @return
+ * @return the history sub directory based on the jobid, timestamp and serial number format
*/
public static String historyLogSubdirectory(JobId id, String timestampComponent, String serialNumberFormat) {
// String result = LOG_VERSION_STRING;
@@ -324,7 +324,7 @@ public class JobHistoryUtils {
* Gets the timestamp component based on millisecond time.
* @param millisecondTime
* @param debugMode
- * @return
+ * @return the timestamp component based on millisecond time
*/
public static String timestampDirectoryComponent(long millisecondTime, boolean debugMode) {
Calendar timestamp = Calendar.getInstance();
@@ -350,7 +350,7 @@ public class JobHistoryUtils {
/**
* Computes a serial number used as part of directory naming for the given jobId.
* @param id the jobId.
- * @return
+ * @return the serial number used as part of directory naming for the given jobid
*/
public static int jobSerialNumber(JobId id) {
return id.getId();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
index 30183934e68..9d8a2a71da8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
@@ -372,7 +372,7 @@ public class Counters
* @param id the id of the counter within the group (0 to N-1)
* @param name the internal name of the counter
* @return the counter for that name
- * @deprecated use {@link findCounter(String, String)} instead
+ * @deprecated use {@link #findCounter(String, String)} instead
*/
@Deprecated
public Counter findCounter(String group, int id, String name) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index 4a968778738..194b80caf10 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -49,7 +49,7 @@ import org.apache.hadoop.util.ToolRunner;
/**
* JobClient
is the primary interface for the user-job to interact
- * with the {@link JobTracker}.
+ * with the cluster.
*
* JobClient
provides facilities to submit jobs, track their
* progress, access component-tasks' reports/logs, get the Map-Reduce cluster
@@ -72,7 +72,7 @@ import org.apache.hadoop.util.ToolRunner;
* on the distributed file-system.
*
*
- * Submitting the job to the JobTracker
and optionally monitoring
+ * Submitting the job to the cluster and optionally monitoring
* it's status.
*
*
@@ -152,7 +152,7 @@ public class JobClient extends CLI {
/**
* We store a JobProfile and a timestamp for when we last
* acquired the job profile. If the job is null, then we cannot
- * perform any of the tasks. The job might be null if the JobTracker
+ * perform any of the tasks. The job might be null if the cluster
* has completely forgotten about the job. (eg, 24 hours after the
* job completes.)
*/
@@ -348,7 +348,7 @@ public class JobClient extends CLI {
}
/**
- * Fetch task completion events from jobtracker for this job.
+ * Fetch task completion events from cluster for this job.
*/
public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
int startFrom) throws IOException {
@@ -429,7 +429,7 @@ public class JobClient extends CLI {
/**
* Build a job client with the given {@link JobConf}, and connect to the
- * default {@link JobTracker}.
+ * default cluster
*
* @param conf the job configuration.
* @throws IOException
@@ -440,7 +440,7 @@ public class JobClient extends CLI {
/**
* Build a job client with the given {@link Configuration},
- * and connect to the default {@link JobTracker}.
+ * and connect to the default cluster
*
* @param conf the configuration.
* @throws IOException
@@ -450,7 +450,7 @@ public class JobClient extends CLI {
}
/**
- * Connect to the default {@link JobTracker}.
+ * Connect to the default cluster
* @param conf the job configuration.
* @throws IOException
*/
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index 5adf28968f7..49d12d764d5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -476,7 +476,6 @@ public class JobConf extends Configuration {
/**
* Use MRAsyncDiskService.moveAndDeleteAllVolumes instead.
- * @see org.apache.hadoop.mapreduce.util.MRAsyncDiskService#cleanupAllVolumes()
*/
@Deprecated
public void deleteLocalFiles() throws IOException {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
index 775fef27d9a..da13934a6a1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TIPStatus.java
@@ -20,7 +20,7 @@ package org.apache.hadoop.mapred;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-/** The states of a {@link TaskInProgress} as seen by the JobTracker.
+/** The states of a Tasks.
*/
@InterfaceAudience.Private
@InterfaceStability.Unstable
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
index fc3cc6bb5aa..09dc1cb6e1e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobID.java
@@ -43,8 +43,6 @@ import org.apache.hadoop.io.Text;
*
* @see TaskID
* @see TaskAttemptID
- * @see org.apache.hadoop.mapred.JobTracker#getNewJobId()
- * @see org.apache.hadoop.mapred.JobTracker#getStartTime()
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index 02251823f61..e52d52b9f1b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -22,8 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
/**
* Place holder for cluster level configuration keys.
*
- * These keys are used by both {@link JobTracker} and {@link TaskTracker}. The
- * keys should have "mapreduce.cluster." as the prefix.
+ * The keys should have "mapreduce.cluster." as the prefix.
*
*/
@InterfaceAudience.Private
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
index 46071929aee..8faebd93a31 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ContainerManager.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse;
+import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
@@ -114,7 +115,7 @@ public interface ContainerManager {
*
* @param request request to get ContainerStatus
of a container
* with the specified ContainerId
- * @return
+ * @return the ContainerStatus
of the container
* @throws YarnRemoteException
*/
@Public
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java
index 33d3b929689..e51a0698acf 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java
@@ -113,7 +113,6 @@ public interface AMResponse {
/**
* Get available headroom for resources in the cluster for the application.
- * @param limit available headroom for resources in the cluster for the application
*/
@Public
@Stable
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
index 92dc7affc5f..0d02cb96b48 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java
@@ -50,7 +50,7 @@ public class ConverterUtils {
*
* @param url
* url to convert
- * @return
+ * @return path from {@link URL}
* @throws URISyntaxException
*/
public static Path getPathFromYarnURL(URL url) throws URISyntaxException {
@@ -63,8 +63,8 @@ public class ConverterUtils {
/**
* change from CharSequence to string for map key and value
- * @param env
- * @return
+ * @param env map for converting
+ * @return string,string map
*/
public static Map convertToString(
Map env) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index a934f0c4c47..db5f532987f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -221,8 +221,7 @@ public class ProcfsBasedProcessTree {
}
/** Verify that the given process id is same as its process group id.
- * @param pidStr Process id of the to-be-verified-process
- * @param procfsDir Procfs root dir
+ * @return true if the process id matches else return false.
*/
public boolean checkPidPgrpidForMatch() {
return checkPidPgrpidForMatch(pid, PROCFS);
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
index fe6471d203d..4b70afe74e7 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/RackResolver.java
@@ -62,7 +62,7 @@ public class RackResolver {
* right resolver implementation.
* @param conf
* @param hostName
- * @return
+ * @return node {@link Node} after resolving the hostname
*/
public static Node resolve(Configuration conf, String hostName) {
init(conf);
@@ -74,7 +74,7 @@ public class RackResolver {
* network topology. This method doesn't initialize the class.
* Call {@link #init(Configuration)} explicitly.
* @param hostName
- * @return
+ * @return node {@link Node} after resolving the hostname
*/
public static Node resolve(String hostName) {
if (!initCalled) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
index 5e2de2b226f..25b26f47987 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java
@@ -77,11 +77,18 @@ public abstract class ContainerExecutor implements Configurable {
List localDirs)
throws IOException, InterruptedException;
+
/**
* Launch the container on the node. This is a blocking call and returns only
* when the container exits.
- *
- * @param launchCtxt
+ * @param container the container to be launched
+ * @param nmPrivateContainerScriptPath the path for launch script
+ * @param nmPrivateTokensPath the path for tokens for the container
+ * @param user the user of the container
+ * @param appId the appId of the container
+ * @param containerWorkDir the work dir for the container
+ * @return the return status of the launch
+ * @throws IOException
*/
public abstract int launchContainer(Container container,
Path nmPrivateContainerScriptPath, Path nmPrivateTokensPath,
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
index f62a5cb989b..60206e0d1bd 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DeletionService.java
@@ -60,11 +60,14 @@ public class DeletionService extends AbstractService {
this.exec = exec;
this.debugDelay = 0;
}
-
+
+ /**
+ *
/**
* Delete the path(s) as this user.
* @param user The user to delete as, or the JVM user if null
- * @param p Paths to delete
+ * @param subDir the sub directory name
+ * @param baseDirs the base directories which contains the subDir's
*/
public void delete(String user, Path subDir, Path... baseDirs) {
// TODO if parent owned by NM, rename within parent inline
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index 553b98b52a6..5cf098df3b7 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -460,7 +460,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
/**
* return the scheduler.
- * @return
+ * @return the scheduler for the Resource Manager.
*/
@Private
public ResourceScheduler getResourceScheduler() {
@@ -469,7 +469,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
/**
* return the resource tracking component.
- * @return
+ * @return the resource tracking component.
*/
@Private
public ResourceTrackerService getResourceTrackerService() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
index babad10128b..0d8e563f284 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ResourceScheduler.java
@@ -29,7 +29,7 @@ import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager;
/**
* This interface is the one implemented by the schedulers. It mainly extends
- * {@link ResourceListener} and {@link YarnScheduler}.
+ * {@link YarnScheduler}.
*
*/
@LimitedPrivate("yarn")
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
index e544e4a66e3..d16ae4b9944 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java
@@ -360,7 +360,7 @@ public class SchedulerApp {
* given priority
?
* @param node node to be checked
* @param priority priority of reserved container
- * @return
+ * @return true is reserved, false if not
*/
public synchronized boolean isReserved(SchedulerNode node, Priority priority) {
Map reservedContainers =
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 92a98c1aef4..a6664780357 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -90,7 +90,7 @@ public class SchedulerNode {
* given application.
*
* @param applicationId application
- * @param containers allocated containers
+ * @param rmContainer allocated container
*/
public synchronized void allocateContainer(ApplicationId applicationId,
RMContainer rmContainer) {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java
index 4bd486e9c60..9c9473da7bb 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/Queue.java
@@ -155,7 +155,7 @@ extends org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue {
* Assign containers to applications in the queue or it's children (if any).
* @param clusterResource the resource of the cluster.
* @param node node on which resources are available
- * @return
+ * @return the resource that is being assigned.
*/
public Resource assignContainers(Resource clusterResource, SchedulerNode node);