YARN-1453. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in doc comments. Contributed by Akira AJISAKA, Andrew Purtell, and Allen Wittenauer.

This commit is contained in:
Tsuyoshi Ozawa 2015-03-16 23:19:05 +09:00
parent 3ff1ba2a7b
commit 3da9a97cfb
64 changed files with 517 additions and 585 deletions

View File

@ -769,6 +769,9 @@ Release 2.7.0 - UNRELEASED
YARN-3171. Sort by Application id, AppAttempt and ContainerID doesn't work YARN-3171. Sort by Application id, AppAttempt and ContainerID doesn't work
in ATS / RM web ui. (Naganarasimha G R via xgong) in ATS / RM web ui. (Naganarasimha G R via xgong)
YARN-1453. [JDK8] Fix Javadoc errors caused by incorrect or illegal tags in
doc comments. (Akira AJISAKA, Andrew Purtell, and Allen Wittenauer via ozawa)
Release 2.6.0 - 2014-11-18 Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -65,28 +65,19 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
public interface ApplicationBaseProtocol { public interface ApplicationBaseProtocol {
/** /**
* <p>
* The interface used by clients to get a report of an Application from the * The interface used by clients to get a report of an Application from the
* <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>. * <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>.
* </p>
*
* <p> * <p>
* The client, via {@link GetApplicationReportRequest} provides the * The client, via {@link GetApplicationReportRequest} provides the
* {@link ApplicationId} of the application. * {@link ApplicationId} of the application.
* </p>
*
* <p> * <p>
* In secure mode,the <code>ResourceManager</code> or * In secure mode,the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> verifies access to the application, * <code>ApplicationHistoryServer</code> verifies access to the application,
* queue etc. before accepting the request. * queue etc. before accepting the request.
* </p>
*
* <p> * <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code> * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetApplicationReportResponse} which includes the * responds with a {@link GetApplicationReportResponse} which includes the
* {@link ApplicationReport} for the application. * {@link ApplicationReport} for the application.
* </p>
*
* <p> * <p>
* If the user does not have <code>VIEW_APP</code> access then the following * If the user does not have <code>VIEW_APP</code> access then the following
* fields in the report will be set to stubbed values: * fields in the report will be set to stubbed values:
@ -99,7 +90,6 @@ public interface ApplicationBaseProtocol {
* <li>original tracking URL - set to "N/A"</li> * <li>original tracking URL - set to "N/A"</li>
* <li>resource usage report - all values are -1</li> * <li>resource usage report - all values are -1</li>
* </ul> * </ul>
* </p>
* *
* @param request * @param request
* request for an application report * request for an application report
@ -148,29 +138,20 @@ public interface ApplicationBaseProtocol {
IOException; IOException;
/** /**
* <p>
* The interface used by clients to get a report of an Application Attempt * The interface used by clients to get a report of an Application Attempt
* from the <code>ResourceManager</code> or * from the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> * <code>ApplicationHistoryServer</code>
* </p>
*
* <p> * <p>
* The client, via {@link GetApplicationAttemptReportRequest} provides the * The client, via {@link GetApplicationAttemptReportRequest} provides the
* {@link ApplicationAttemptId} of the application attempt. * {@link ApplicationAttemptId} of the application attempt.
* </p>
*
* <p> * <p>
* In secure mode,the <code>ResourceManager</code> or * In secure mode,the <code>ResourceManager</code> or
* <code>ApplicationHistoryServer</code> verifies access to the method before * <code>ApplicationHistoryServer</code> verifies access to the method before
* accepting the request. * accepting the request.
* </p>
*
* <p> * <p>
* The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code> * The <code>ResourceManager</code> or <code>ApplicationHistoryServer</code>
* responds with a {@link GetApplicationAttemptReportResponse} which includes * responds with a {@link GetApplicationAttemptReportResponse} which includes
* the {@link ApplicationAttemptReport} for the application attempt. * the {@link ApplicationAttemptReport} for the application attempt.
* </p>
*
* <p> * <p>
* If the user does not have <code>VIEW_APP</code> access then the following * If the user does not have <code>VIEW_APP</code> access then the following
* fields in the report will be set to stubbed values: * fields in the report will be set to stubbed values:
@ -181,7 +162,6 @@ public interface ApplicationBaseProtocol {
* <li>diagnostics - set to "N/A"</li> * <li>diagnostics - set to "N/A"</li>
* <li>tracking URL</li> * <li>tracking URL</li>
* </ul> * </ul>
* </p>
* *
* @param request * @param request
* request for an application attempt report * request for an application attempt report

View File

@ -135,9 +135,6 @@ public interface ApplicationClientProtocol extends ApplicationBaseProtocol {
* @return (empty) response on accepting the submission * @return (empty) response on accepting the submission
* @throws YarnException * @throws YarnException
* @throws IOException * @throws IOException
* @throws InvalidResourceRequestException
* The exception is thrown when a {@link ResourceRequest} is out of
* the range of the configured lower and upper resource boundaries.
* @see #getNewApplication(GetNewApplicationRequest) * @see #getNewApplication(GetNewApplicationRequest)
*/ */
@Public @Public

View File

@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.util.Records;
* A list of unused {@link Container} which are being returned. * A list of unused {@link Container} which are being returned.
* </li> * </li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationMasterProtocol#allocate(AllocateRequest) * @see ApplicationMasterProtocol#allocate(AllocateRequest)
*/ */

View File

@ -39,15 +39,16 @@ import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>The response sent by the <code>ResourceManager</code> the * The response sent by the <code>ResourceManager</code> the
* <code>ApplicationMaster</code> during resource negotiation.</p> * <code>ApplicationMaster</code> during resource negotiation.
* * <p>
* <p>The response, includes: * The response, includes:
* <ul> * <ul>
* <li>Response ID to track duplicate responses.</li> * <li>Response ID to track duplicate responses.</li>
* <li> * <li>
* An AMCommand sent by ResourceManager to let the <code>ApplicationMaster</code> * An AMCommand sent by ResourceManager to let the
* take some actions (resync, shutdown etc.). * {@code ApplicationMaster} take some actions (resync, shutdown etc.).
* </li>
* <li>A list of newly allocated {@link Container}.</li> * <li>A list of newly allocated {@link Container}.</li>
* <li>A list of completed {@link Container}s' statuses.</li> * <li>A list of completed {@link Container}s' statuses.</li>
* <li> * <li>
@ -59,7 +60,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>A description of resources requested back by the cluster</li> * <li>A description of resources requested back by the cluster</li>
* <li>AMRMToken, if AMRMToken has been rolled over</li> * <li>AMRMToken, if AMRMToken has been rolled over</li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationMasterProtocol#allocate(AllocateRequest) * @see ApplicationMasterProtocol#allocate(AllocateRequest)
*/ */
@ -220,16 +220,16 @@ public abstract class AllocateResponse {
public abstract void setNumClusterNodes(int numNodes); public abstract void setNumClusterNodes(int numNodes);
/** /**
* <p>Get the description of containers owned by the AM, but requested back by * Get the description of containers owned by the AM, but requested back by
* the cluster. Note that the RM may have an inconsistent view of the * the cluster. Note that the RM may have an inconsistent view of the
* resources owned by the AM. These messages are advisory, and the AM may * resources owned by the AM. These messages are advisory, and the AM may
* elect to ignore them.<p> * elect to ignore them.
* * <p>
* <p>The message is a snapshot of the resources the RM wants back from the AM. * The message is a snapshot of the resources the RM wants back from the AM.
* While demand persists, the RM will repeat its request; applications should * While demand persists, the RM will repeat its request; applications should
* not interpret each message as a request for <em>additional<em> * not interpret each message as a request for <em>additional</em>
* resources on top of previous messages. Resources requested consistently * resources on top of previous messages. Resources requested consistently
* over some duration may be forcibly killed by the RM.<p> * over some duration may be forcibly killed by the RM.
* *
* @return A specification of the resources to reclaim from this AM. * @return A specification of the resources to reclaim from this AM.
*/ */
@ -242,15 +242,17 @@ public abstract class AllocateResponse {
public abstract void setPreemptionMessage(PreemptionMessage request); public abstract void setPreemptionMessage(PreemptionMessage request);
/** /**
* <p>Get the list of NMTokens required for communicating with NM. New NMTokens * Get the list of NMTokens required for communicating with NM. New NMTokens
* issued only if<p> * issued only if
* <p>1) AM is receiving first container on underlying NodeManager.<br> * <p>
* 1) AM is receiving first container on underlying NodeManager.<br>
* OR<br> * OR<br>
* 2) NMToken master key rolled over in ResourceManager and AM is getting new * 2) NMToken master key rolled over in ResourceManager and AM is getting new
* container on the same underlying NodeManager.<p> * container on the same underlying NodeManager.
* <p>AM will receive one NMToken per NM irrespective of the number of containers * <p>
* AM will receive one NMToken per NM irrespective of the number of containers
* issued on same NM. AM is expected to store these tokens until issued a * issued on same NM. AM is expected to store these tokens until issued a
* new token for the same NM.<p> * new token for the same NM.
*/ */
@Public @Public
@Stable @Stable

View File

@ -25,19 +25,18 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>The finalization request sent by the <code>ApplicationMaster</code> to * The finalization request sent by the {@code ApplicationMaster} to
* inform the <code>ResourceManager</code> about its completion.</p> * inform the {@code ResourceManager} about its completion.
* * <p>
* <p>The final request includes details such: * The final request includes details such:
* <ul> * <ul>
* <li>Final state of the <code>ApplicationMaster</code></li> * <li>Final state of the {@code ApplicationMaster}</li>
* <li> * <li>
* Diagnostic information in case of failure of the * Diagnostic information in case of failure of the
* <code>ApplicationMaster</code> * {@code ApplicationMaster}
* </li> * </li>
* <li>Tracking URL</li> * <li>Tracking URL</li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest) * @see ApplicationMasterProtocol#finishApplicationMaster(FinishApplicationMasterRequest)
*/ */

View File

@ -26,18 +26,15 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>
* The response sent by the <code>ResourceManager</code> to a * The response sent by the <code>ResourceManager</code> to a
* <code>ApplicationMaster</code> on it's completion. * <code>ApplicationMaster</code> on it's completion.
* </p>
*
* <p> * <p>
* The response, includes: * The response, includes:
* <ul> * <ul>
* <li>A flag which indicates that the application has successfully unregistered * <li>A flag which indicates that the application has successfully unregistered
* with the RM and the application can safely stop.</li> * with the RM and the application can safely stop.</li>
* </ul> * </ul>
* </p> * <p>
* Note: The flag indicates whether the application has successfully * Note: The flag indicates whether the application has successfully
* unregistered and is safe to stop. The application may stop after the flag is * unregistered and is safe to stop. The application may stop after the flag is
* true. If the application stops before the flag is true then the RM may retry * true. If the application stops before the flag is true then the RM may retry

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.yarn.util.Records;
* <p>The request from clients to get a report of Applications * <p>The request from clients to get a report of Applications
* in the cluster from the <code>ResourceManager</code>.</p> * in the cluster from the <code>ResourceManager</code>.</p>
* *
*
* @see ApplicationClientProtocol#getApplications(GetApplicationsRequest) * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest)
*/ */
@Public @Public

View File

@ -27,8 +27,8 @@ import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>The response sent by the <code>ResourceManager</code> to a client * The response sent by the <code>ResourceManager</code> to a client
* requesting cluster metrics.<p> * requesting cluster metrics.
* *
* @see YarnClusterMetrics * @see YarnClusterMetrics
* @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest) * @see ApplicationClientProtocol#getClusterMetrics(GetClusterMetricsRequest)

View File

@ -28,11 +28,9 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>
* The request sent by the <code>ApplicationMaster</code> to the * The request sent by the <code>ApplicationMaster</code> to the
* <code>NodeManager</code> to get {@link ContainerStatus} of requested * <code>NodeManager</code> to get {@link ContainerStatus} of requested
* containers. * containers.
* </p>
* *
* @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest) * @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)
*/ */

View File

@ -32,11 +32,9 @@ import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>
* The response sent by the <code>NodeManager</code> to the * The response sent by the <code>NodeManager</code> to the
* <code>ApplicationMaster</code> when asked to obtain the * <code>ApplicationMaster</code> when asked to obtain the
* <code>ContainerStatus</code> of requested containers. * <code>ContainerStatus</code> of requested containers.
* </p>
* *
* @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest) * @see ContainerManagementProtocol#getContainerStatuses(GetContainerStatusesRequest)
*/ */

View File

@ -63,7 +63,7 @@ public abstract class GetQueueInfoRequest {
public abstract void setQueueName(String queueName); public abstract void setQueueName(String queueName);
/** /**
* Is information about <em>active applications<e/m> required? * Is information about <em>active applications</em> required?
* @return <code>true</code> if applications' information is to be included, * @return <code>true</code> if applications' information is to be included,
* else <code>false</code> * else <code>false</code>
*/ */

View File

@ -27,12 +27,11 @@ import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>The response sent by the <code>ResourceManager</code> to a client * The response sent by the {@code ResourceManager} to a client
* requesting information about queues in the system.</p> * requesting information about queues in the system.
* * <p>
* <p>The response includes a {@link QueueInfo} which has details such as * The response includes a {@link QueueInfo} which has details such as
* queue name, used/total capacities, running applications, child queues etc * queue name, used/total capacities, running applications, child queues etc.
* .</p>
* *
* @see QueueInfo * @see QueueInfo
* @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest) * @see ApplicationClientProtocol#getQueueInfo(GetQueueInfoRequest)

View File

@ -26,21 +26,20 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>
* The response sent by the <code>ResourceManager</code> to the client aborting * The response sent by the <code>ResourceManager</code> to the client aborting
* a submitted application. * a submitted application.
* </p>
* <p> * <p>
* The response, includes: * The response, includes:
* <ul> * <ul>
* <li>A flag which indicates that the process of killing the application is * <li>
* completed or not.</li> * A flag which indicates that the process of killing the application is
* completed or not.
* </li>
* </ul> * </ul>
* Note: user is recommended to wait until this flag becomes true, otherwise if * Note: user is recommended to wait until this flag becomes true, otherwise if
* the <code>ResourceManager</code> crashes before the process of killing the * the <code>ResourceManager</code> crashes before the process of killing the
* application is completed, the <code>ResourceManager</code> may retry this * application is completed, the <code>ResourceManager</code> may retry this
* application on recovery. * application on recovery.
* </p>
* *
* @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest) * @see ApplicationClientProtocol#forceKillApplication(KillApplicationRequest)
*/ */

View File

@ -24,16 +24,15 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>The request sent by the <code>ApplicationMaster</code> to * The request sent by the {@code ApplicationMaster} to {@code ResourceManager}
* <code>ResourceManager</code> on registration.</p> * on registration.
* * <p>
* <p>The registration includes details such as: * The registration includes details such as:
* <ul> * <ul>
* <li>Hostname on which the AM is running.</li> * <li>Hostname on which the AM is running.</li>
* <li>RPC Port</li> * <li>RPC Port</li>
* <li>Tracking URL</li> * <li>Tracking URL</li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest) * @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)
*/ */
@ -83,20 +82,20 @@ public abstract class RegisterApplicationMasterRequest {
public abstract void setHost(String host); public abstract void setHost(String host);
/** /**
* Get the <em>RPC port</em> on which the <code>ApplicationMaster</code> * Get the <em>RPC port</em> on which the {@code ApplicationMaster} is
* is responding. * responding.
* @return the <em>RPC port<em> on which the <code>ApplicationMaster</code> is * @return the <em>RPC port</em> on which the {@code ApplicationMaster}
* responding * is responding
*/ */
@Public @Public
@Stable @Stable
public abstract int getRpcPort(); public abstract int getRpcPort();
/** /**
* Set the <em>RPC port<em> on which the <code>ApplicationMaster</code> is * Set the <em>RPC port</em> on which the {@code ApplicationMaster} is
* responding. * responding.
* @param port <em>RPC port<em> on which the <code>ApplicationMaster</code> is * @param port <em>RPC port</em> on which the {@code ApplicationMaster}
* responding * is responding
*/ */
@Public @Public
@Stable @Stable

View File

@ -36,16 +36,15 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>The response sent by the <code>ResourceManager</code> to a new * The response sent by the {@code ResourceManager} to a new
* <code>ApplicationMaster</code> on registration.</p> * {@code ApplicationMaster} on registration.
* * <p>
* <p>The response contains critical details such as: * The response contains critical details such as:
* <ul> * <ul>
* <li>Maximum capability for allocated resources in the cluster.</li> * <li>Maximum capability for allocated resources in the cluster.</li>
* <li><code>ApplicationACL</code>s for the application.</li> * <li>{@code ApplicationACL}s for the application.</li>
* <li>ClientToAMToken master key.</li> * <li>ClientToAMToken master key.</li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest) * @see ApplicationMasterProtocol#registerApplicationMaster(RegisterApplicationMasterRequest)
*/ */

View File

@ -74,10 +74,11 @@ public abstract class StartContainerRequest {
public abstract void setContainerLaunchContext(ContainerLaunchContext context); public abstract void setContainerLaunchContext(ContainerLaunchContext context);
/** /**
* <p>Get the container token to be used for authorization during starting * Get the container token to be used for authorization during starting
* container.</p> * container.
* <p>Note: {@link NMToken} will be used for authenticating communication with </code> * <p>
* NodeManager</code>.</p> * Note: {@link NMToken} will be used for authenticating communication with
* {@code NodeManager}.
* @return the container token to be used for authorization during starting * @return the container token to be used for authorization during starting
* container. * container.
* @see NMToken * @see NMToken

View File

@ -24,10 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p> * {@code ApplicationAttemptReport} is a report of an application attempt.
* <code>ApplicationAttemptReport</code> is a report of an application attempt.
* </p>
*
* <p> * <p>
* It includes details such as: * It includes details such as:
* <ul> * <ul>
@ -40,8 +37,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>{@link YarnApplicationAttemptState} of the application attempt.</li> * <li>{@link YarnApplicationAttemptState} of the application attempt.</li>
* <li>{@link ContainerId} of the master Container.</li> * <li>{@link ContainerId} of the master Container.</li>
* </ul> * </ul>
* </p>
*
*/ */
@Public @Public
@Unstable @Unstable

View File

@ -28,9 +28,9 @@ import org.apache.hadoop.yarn.util.Records;
import java.util.Set; import java.util.Set;
/** /**
* <p><code>ApplicationReport</code> is a report of an application.</p> * {@code ApplicationReport} is a report of an application.
* * <p>
* <p>It includes details such as: * It includes details such as:
* <ul> * <ul>
* <li>{@link ApplicationId} of the application.</li> * <li>{@link ApplicationId} of the application.</li>
* <li>Applications user.</li> * <li>Applications user.</li>
@ -44,7 +44,6 @@ import java.util.Set;
* <li>Start time of the application.</li> * <li>Start time of the application.</li>
* <li>Client {@link Token} of the application (if security is enabled).</li> * <li>Client {@link Token} of the application (if security is enabled).</li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest) * @see ApplicationClientProtocol#getApplicationReport(org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest)
*/ */
@ -341,20 +340,20 @@ public abstract class ApplicationReport {
/** /**
* Get the AMRM token of the application. * Get the AMRM token of the application.
* <p/> * <p>
* The AMRM token is required for AM to RM scheduling operations. For * The AMRM token is required for AM to RM scheduling operations. For
* managed Application Masters Yarn takes care of injecting it. For unmanaged * managed Application Masters Yarn takes care of injecting it. For unmanaged
* Applications Masters, the token must be obtained via this method and set * Applications Masters, the token must be obtained via this method and set
* in the {@link org.apache.hadoop.security.UserGroupInformation} of the * in the {@link org.apache.hadoop.security.UserGroupInformation} of the
* current user. * current user.
* <p/> * <p>
* The AMRM token will be returned only if all the following conditions are * The AMRM token will be returned only if all the following conditions are
* met: * met:
* <li> * <ul>
* <ul>the requester is the owner of the ApplicationMaster</ul> * <li>the requester is the owner of the ApplicationMaster</li>
* <ul>the application master is an unmanaged ApplicationMaster</ul> * <li>the application master is an unmanaged ApplicationMaster</li>
* <ul>the application master is in ACCEPTED state</ul> * <li>the application master is in ACCEPTED state</li>
* </li> * </ul>
* Else this method returns NULL. * Else this method returns NULL.
* *
* @return the AM to RM token if available. * @return the AM to RM token if available.

View File

@ -33,11 +33,11 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>ApplicationSubmissionContext</code> represents all of the * {@code ApplicationSubmissionContext} represents all of the
* information needed by the <code>ResourceManager</code> to launch * information needed by the {@code ResourceManager} to launch
* the <code>ApplicationMaster</code> for an application.</p> * the {@code ApplicationMaster} for an application.
* * <p>
* <p>It includes details such as: * It includes details such as:
* <ul> * <ul>
* <li>{@link ApplicationId} of the application.</li> * <li>{@link ApplicationId} of the application.</li>
* <li>Application user.</li> * <li>Application user.</li>
@ -47,18 +47,20 @@ import org.apache.hadoop.yarn.util.Records;
* {@link ContainerLaunchContext} of the container in which the * {@link ContainerLaunchContext} of the container in which the
* <code>ApplicationMaster</code> is executed. * <code>ApplicationMaster</code> is executed.
* </li> * </li>
* <li>maxAppAttempts. The maximum number of application attempts. * <li>
* maxAppAttempts. The maximum number of application attempts.
* It should be no larger than the global number of max attempts in the * It should be no larger than the global number of max attempts in the
* Yarn configuration.</li> * Yarn configuration.
* <li>attemptFailuresValidityInterval. The default value is -1. * </li>
* when attemptFailuresValidityInterval in milliseconds is set to > 0, * <li>
* the failure number will no take failures which happen out of the * attemptFailuresValidityInterval. The default value is -1.
* validityInterval into failure count. If failure count reaches to * when attemptFailuresValidityInterval in milliseconds is set to
* maxAppAttempts, the application will be failed. * {@literal >} 0, the failure number will no take failures which happen
* out of the validityInterval into failure count. If failure count
* reaches to maxAppAttempts, the application will be failed.
* </li> * </li>
* <li>Optional, application-specific {@link LogAggregationContext}</li> * <li>Optional, application-specific {@link LogAggregationContext}</li>
* </ul> * </ul>
* </p>
* *
* @see ContainerLaunchContext * @see ContainerLaunchContext
* @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest) * @see ApplicationClientProtocol#submitApplication(org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest)

View File

@ -27,15 +27,14 @@ import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>Container</code> represents an allocated resource in the cluster. * {@code Container} represents an allocated resource in the cluster.
* </p> * <p>
* * The {@code ResourceManager} is the sole authority to allocate any
* <p>The <code>ResourceManager</code> is the sole authority to allocate any * {@code Container} to applications. The allocated {@code Container}
* <code>Container</code> to applications. The allocated <code>Container</code>
* is always on a single node and has a unique {@link ContainerId}. It has * is always on a single node and has a unique {@link ContainerId}. It has
* a specific amount of {@link Resource} allocated.</p> * a specific amount of {@link Resource} allocated.
* * <p>
* <p>It includes details such as: * It includes details such as:
* <ul> * <ul>
* <li>{@link ContainerId} for the container, which is globally unique.</li> * <li>{@link ContainerId} for the container, which is globally unique.</li>
* <li> * <li>
@ -49,12 +48,10 @@ import org.apache.hadoop.yarn.util.Records;
* authenticity of the allocation. * authenticity of the allocation.
* </li> * </li>
* </ul> * </ul>
* </p>
* *
* <p>Typically, an <code>ApplicationMaster</code> receives the * Typically, an {@code ApplicationMaster} receives the {@code Container}
* <code>Container</code> from the <code>ResourceManager</code> during * from the {@code ResourceManager} during resource-negotiation and then
* resource-negotiation and then talks to the <code>NodeManager</code> to * talks to the {@code NodeManager} to start/stop containers.
* start/stop containers.</p>
* *
* @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
* @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)

View File

@ -30,10 +30,10 @@ import org.apache.hadoop.yarn.server.api.AuxiliaryService;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>ContainerLaunchContext</code> represents all of the information * {@code ContainerLaunchContext} represents all of the information
* needed by the <code>NodeManager</code> to launch a container.</p> * needed by the {@code NodeManager} to launch a container.
* * <p>
* <p>It includes details such as: * It includes details such as:
* <ul> * <ul>
* <li>{@link ContainerId} of the container.</li> * <li>{@link ContainerId} of the container.</li>
* <li>{@link Resource} allocated to the container.</li> * <li>{@link Resource} allocated to the container.</li>
@ -47,7 +47,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>Environment variables for the launched process.</li> * <li>Environment variables for the launched process.</li>
* <li>Command to launch the container.</li> * <li>Command to launch the container.</li>
* </ul> * </ul>
* </p>
* *
* @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest) * @see ContainerManagementProtocol#startContainers(org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest)
*/ */

View File

@ -24,10 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p> * {@code ContainerReport} is a report of an container.
* <code>ContainerReport</code> is a report of an container.
* </p>
*
* <p> * <p>
* It includes details such as: * It includes details such as:
* <ul> * <ul>
@ -43,8 +40,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>Log URL.</li> * <li>Log URL.</li>
* <li>nodeHttpAddress</li> * <li>nodeHttpAddress</li>
* </ul> * </ul>
* </p>
*
*/ */
@Public @Public

View File

@ -25,17 +25,16 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>ContainerStatus</code> represents the current status of a * {@code ContainerStatus} represents the current status of a
* <code>Container</code>.</p> * {@code Container}.
* * <p>
* <p>It provides details such as: * It provides details such as:
* <ul> * <ul>
* <li><code>ContainerId</code> of the container.</li> * <li>{@code ContainerId} of the container.</li>
* <li><code>ContainerState</code> of the container.</li> * <li>{@code ContainerState} of the container.</li>
* <li><em>Exit status</em> of a completed container.</li> * <li><em>Exit status</em> of a completed container.</li>
* <li><em>Diagnostic</em> message for a failed container.</li> * <li><em>Diagnostic</em> message for a failed container.</li>
* </ul> * </ul>
* </p>
*/ */
@Public @Public
@Stable @Stable

View File

@ -23,10 +23,10 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
/** /**
* <p><code>LocalResourceType</code> specifies the <em>type</em> * {@code LocalResourceType} specifies the <em>type</em>
* of a resource localized by the <code>NodeManager</code>.</p> * of a resource localized by the {@code NodeManager}.
* * <p>
* <p>The <em>type</em> can be one of: * The <em>type</em> can be one of:
* <ul> * <ul>
* <li> * <li>
* {@link #FILE} - Regular file i.e. uninterpreted bytes. * {@link #FILE} - Regular file i.e. uninterpreted bytes.
@ -37,8 +37,8 @@ import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
* </li> * </li>
* <li> * <li>
* {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}. * {@link #PATTERN} - A hybrid between {@link #ARCHIVE} and {@link #FILE}.
* </li>
* </ul> * </ul>
* </p>
* *
* @see LocalResource * @see LocalResource
* @see ContainerLaunchContext * @see ContainerLaunchContext

View File

@ -23,10 +23,10 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.ContainerManagementProtocol; import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
/** /**
* <p><code>LocalResourceVisibility</code> specifies the <em>visibility</em> * {@code LocalResourceVisibility} specifies the <em>visibility</em>
* of a resource localized by the <code>NodeManager</code>.</p> * of a resource localized by the {@code NodeManager}.
* * <p>
* <p>The <em>visibility</em> can be one of: * The <em>visibility</em> can be one of:
* <ul> * <ul>
* <li>{@link #PUBLIC} - Shared by all users on the node.</li> * <li>{@link #PUBLIC} - Shared by all users on the node.</li>
* <li> * <li>
@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.api.ContainerManagementProtocol;
* <em>same application</em> on the node. * <em>same application</em> on the node.
* </li> * </li>
* </ul> * </ul>
* </p>
* *
* @see LocalResource * @see LocalResource
* @see ContainerLaunchContext * @see ContainerLaunchContext

View File

@ -24,30 +24,37 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>LogAggregationContext</code> represents all of the * {@code LogAggregationContext} represents all of the
* information needed by the <code>NodeManager</code> to handle * information needed by the {@code NodeManager} to handle
* the logs for an application.</p> * the logs for an application.
* * <p>
* <p>It includes details such as: * It includes details such as:
* <ul> * <ul>
* <li>includePattern. It uses Java Regex to filter the log files * <li>
* includePattern. It uses Java Regex to filter the log files
* which match the defined include pattern and those log files * which match the defined include pattern and those log files
* will be uploaded when the application finishes. </li> * will be uploaded when the application finishes.
* <li>excludePattern. It uses Java Regex to filter the log files * </li>
* <li>
* excludePattern. It uses Java Regex to filter the log files
* which match the defined exclude pattern and those log files * which match the defined exclude pattern and those log files
* will not be uploaded when application finishes. If the log file * will not be uploaded when application finishes. If the log file
* name matches both the include and the exclude pattern, this file * name matches both the include and the exclude pattern, this file
* will be excluded eventually</li> * will be excluded eventually.
* <li>rolledLogsIncludePattern. It uses Java Regex to filter the log files * </li>
* <li>
* rolledLogsIncludePattern. It uses Java Regex to filter the log files
* which match the defined include pattern and those log files * which match the defined include pattern and those log files
* will be aggregated in a rolling fashion.</li> * will be aggregated in a rolling fashion.
* <li>rolledLogsExcludePattern. It uses Java Regex to filter the log files * </li>
* <li>
* rolledLogsExcludePattern. It uses Java Regex to filter the log files
* which match the defined exclude pattern and those log files * which match the defined exclude pattern and those log files
* will not be aggregated in a rolling fashion. If the log file * will not be aggregated in a rolling fashion. If the log file
* name matches both the include and the exclude pattern, this file * name matches both the include and the exclude pattern, this file
* will be excluded eventually</li> * will be excluded eventually.
* </li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationSubmissionContext * @see ApplicationSubmissionContext
*/ */

View File

@ -28,10 +28,10 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>NodeReport</code> is a summary of runtime information of a * {@code NodeReport} is a summary of runtime information of a node
* node in the cluster.</p> * in the cluster.
* * <p>
* <p>It includes details such as: * It includes details such as:
* <ul> * <ul>
* <li>{@link NodeId} of the node.</li> * <li>{@link NodeId} of the node.</li>
* <li>HTTP Tracking URL of the node.</li> * <li>HTTP Tracking URL of the node.</li>
@ -40,7 +40,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>Total available {@link Resource} of the node.</li> * <li>Total available {@link Resource} of the node.</li>
* <li>Number of running containers on the node.</li> * <li>Number of running containers on the node.</li>
* </ul> * </ul>
* </p>
* *
* @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) * @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)
*/ */

View File

@ -24,36 +24,36 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>A {@link PreemptionMessage} is part of the RM-AM protocol, and it is used by * A {@link PreemptionMessage} is part of the RM-AM protocol, and it is used by
* the RM to specify resources that the RM wants to reclaim from this * the RM to specify resources that the RM wants to reclaim from this
* <code>ApplicationMaster</code> (AM). The AM receives a {@link * {@code ApplicationMaster} (AM). The AM receives a {@link
* StrictPreemptionContract} message encoding which containers the platform may * StrictPreemptionContract} message encoding which containers the platform may
* forcibly kill, granting it an opportunity to checkpoint state or adjust its * forcibly kill, granting it an opportunity to checkpoint state or adjust its
* execution plan. The message may also include a {@link PreemptionContract} * execution plan. The message may also include a {@link PreemptionContract}
* granting the AM more latitude in selecting which resources to return to the * granting the AM more latitude in selecting which resources to return to the
* cluster.<p> * cluster.
* * <p>
* <p>The AM should decode both parts of the message. The {@link * The AM should decode both parts of the message. The {@link
* StrictPreemptionContract} specifies particular allocations that the RM * StrictPreemptionContract} specifies particular allocations that the RM
* requires back. The AM can checkpoint containers' state, adjust its execution * requires back. The AM can checkpoint containers' state, adjust its execution
* plan to move the computation, or take no action and hope that conditions that * plan to move the computation, or take no action and hope that conditions that
* caused the RM to ask for the container will change.<p> * caused the RM to ask for the container will change.
* * <p>
* <p>In contrast, the {@link PreemptionContract} also includes a description of * In contrast, the {@link PreemptionContract} also includes a description of
* resources with a set of containers. If the AM releases containers matching * resources with a set of containers. If the AM releases containers matching
* that profile, then the containers enumerated in {@link * that profile, then the containers enumerated in {@link
* PreemptionContract#getContainers()} may not be killed.<p> * PreemptionContract#getContainers()} may not be killed.
* * <p>
* <p>Each preemption message reflects the RM's current understanding of the * Each preemption message reflects the RM's current understanding of the
* cluster state, so a request to return <emph>N</emph> containers may not * cluster state, so a request to return <em>N</em> containers may not
* reflect containers the AM is releasing, recently exited containers the RM has * reflect containers the AM is releasing, recently exited containers the RM has
* yet to learn about, or new containers allocated before the message was * yet to learn about, or new containers allocated before the message was
* generated. Conversely, an RM may request a different profile of containers in * generated. Conversely, an RM may request a different profile of containers in
* subsequent requests.<p> * subsequent requests.
* * <p>
* <p>The policy enforced by the RM is part of the scheduler. Generally, only * The policy enforced by the RM is part of the scheduler. Generally, only
* containers that have been requested consistently should be killed, but the * containers that have been requested consistently should be killed, but the
* details are not specified.<p> * details are not specified.
*/ */
@Public @Public
@Evolving @Evolving

View File

@ -23,18 +23,15 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
/** /**
* <p> * {@code QueueACL} enumerates the various ACLs for queues.
* <code>QueueACL</code> enumerates the various ACLs for queues.
* </p>
*
* <p> * <p>
* The ACL is one of: * The ACL is one of:
* <ul> * <ul>
* <li>{@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the * <li>
* queue.</li> * {@link #SUBMIT_APPLICATIONS} - ACL to submit applications to the queue.
* </li>
* <li>{@link #ADMINISTER_QUEUE} - ACL to administer the queue.</li> * <li>{@link #ADMINISTER_QUEUE} - ACL to administer the queue.</li>
* </ul> * </ul>
* </p>
* *
* @see QueueInfo * @see QueueInfo
* @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest) * @see ApplicationClientProtocol#getQueueUserAcls(org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest)

View File

@ -29,9 +29,9 @@ import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>QueueInfo is a report of the runtime information of the queue.</p> * QueueInfo is a report of the runtime information of the queue.
* * <p>
* <p>It includes information such as: * It includes information such as:
* <ul> * <ul>
* <li>Queue name.</li> * <li>Queue name.</li>
* <li>Capacity of the queue.</li> * <li>Capacity of the queue.</li>
@ -41,7 +41,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>Running applications.</li> * <li>Running applications.</li>
* <li>{@link QueueState} of the queue.</li> * <li>{@link QueueState} of the queue.</li>
* </ul> * </ul>
* </p>
* *
* @see QueueState * @see QueueState
* @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)

View File

@ -23,14 +23,13 @@ import org.apache.hadoop.classification.InterfaceStability.Stable;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
/** /**
* <p>State of a Queue.</p> * State of a Queue.
* * <p>
* <p>A queue is in one of: * A queue is in one of:
* <ul> * <ul>
* <li>{@link #RUNNING} - normal state.</li> * <li>{@link #RUNNING} - normal state.</li>
* <li>{@link #STOPPED} - not accepting new application submissions. * <li>{@link #STOPPED} - not accepting new application submissions.</li>
* </ul> * </ul>
* </p>
* *
* @see QueueInfo * @see QueueInfo
* @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest) * @see ApplicationClientProtocol#getQueueInfo(org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest)

View File

@ -25,23 +25,18 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p>
* {@link ReservationRequest} represents the request made by an application to * {@link ReservationRequest} represents the request made by an application to
* the {@code ResourceManager} to reserve {@link Resource}s. * the {@code ResourceManager} to reserve {@link Resource}s.
* </p>
*
* <p> * <p>
* It includes: * It includes:
* <ul> * <ul>
* <li>{@link Resource} required for each request.</li> * <li>{@link Resource} required for each request.</li>
* <li> * <li>
* Number of containers, of above specifications, which are required by the * Number of containers, of above specifications, which are required by the
* application.</li> * application.
* <li> * </li>
* Concurrency that indicates the gang size of the request.</li> * <li>Concurrency that indicates the gang size of the request.</li>
* </ul> * </ul>
* </p>
*
*/ */
@Public @Public
@Unstable @Unstable

View File

@ -33,14 +33,13 @@ public enum ReservationRequestInterpreter {
* Requires that exactly ONE among the {@link ReservationRequest} submitted as * Requires that exactly ONE among the {@link ReservationRequest} submitted as
* of a {@link ReservationDefinition} is satisfied to satisfy the overall * of a {@link ReservationDefinition} is satisfied to satisfy the overall
* {@link ReservationDefinition}. * {@link ReservationDefinition}.
* * <p>
* WHEN TO USE THIS: This is useful when the user have multiple equivalent * WHEN TO USE THIS: This is useful when the user have multiple equivalent
* ways to run an application, and wants to expose to the ReservationAgent * ways to run an application, and wants to expose to the ReservationAgent
* such flexibility. For example an application could use one <32GB,16core> * such flexibility. For example an application could use one
* container for 10min, or 16 <2GB,1core> containers for 15min, the * {@literal <32GB,16core>} container for 10min, or 16 {@literal <2GB,1core>}
* ReservationAgent will decide which one of the two it is best for the system * containers for 15min, the ReservationAgent will decide which one of the
* to place. * two it is best for the system to place.
*
*/ */
R_ANY, R_ANY,
@ -49,16 +48,16 @@ public enum ReservationRequestInterpreter {
* {@link ReservationDefinition} are satisfied for the overall * {@link ReservationDefinition} are satisfied for the overall
* {@link ReservationDefinition} to be satisfied. No constraints are imposed * {@link ReservationDefinition} to be satisfied. No constraints are imposed
* on the temporal ordering of the allocation used to satisfy the * on the temporal ordering of the allocation used to satisfy the
* ResourceRequeusts. * ResourceRequests.
* * <p>
* WHEN TO USE THIS: This is useful to capture a scenario in which the user * WHEN TO USE THIS: This is useful to capture a scenario in which the user
* cares for multiple ReservationDefinition to be all accepted, or none. For * cares for multiple ReservationDefinition to be all accepted, or none. For
* example, a user might want a reservation R1: with 10 x <8GB,4core> for * example, a user might want a reservation R1: with 10 x
* 10min, and a reservation R2: with 2 <1GB,1core> for 1h, and only if both * {@literal <8GB,4core>} for 10min, and a reservation R2:
* are satisfied the workflow run in this reservation succeeds. The key * with 2 {@literal <1GB,1core>} for 1h, and only if both are satisfied
* differentiator from ALL and ORDER, ORDER_NO_GAP, is that ALL imposes no * the workflow run in this reservation succeeds. The key differentiator
* restrictions on the relative allocations used to place R1 and R2 above. * from ALL and ORDER, ORDER_NO_GAP, is that ALL imposes no restrictions
* * on the relative allocations used to place R1 and R2 above.
*/ */
R_ALL, R_ALL,
@ -73,15 +72,16 @@ public enum ReservationRequestInterpreter {
* constraints are imposed on temporal gaps between subsequent allocations * constraints are imposed on temporal gaps between subsequent allocations
* (the last instant of the previous allocation can be an arbitrary long * (the last instant of the previous allocation can be an arbitrary long
* period of time before the first instant of the subsequent allocation). * period of time before the first instant of the subsequent allocation).
* * <p>
* WHEN TO USE THIS: Like ALL this requires all ReservationDefinitions to be * WHEN TO USE THIS: Like ALL this requires all ReservationDefinitions to be
* placed, but it also imposes a time ordering on the allocations used. This * placed, but it also imposes a time ordering on the allocations used. This
* is important if the ReservationDefinition(s) are used to describe a * is important if the ReservationDefinition(s) are used to describe a
* workflow with inherent inter-stage dependencies. For example, a first job * workflow with inherent inter-stage dependencies. For example, a first job
* runs in a ReservaitonDefinition R1 (10 x <1GB,1core> for 20min), and its * runs in a ReservaitonDefinition R1 (10 x {@literal <1GB,1core>}
* output is consumed by a second job described by a ReservationDefinition R2 * for 20min), and its output is consumed by a second job described by
* (5 x <1GB,1core>) for 50min). R2 allocation cannot overlap R1, as R2 models * a ReservationDefinition R2 (5 x {@literal <1GB,1core>}) for 50min).
* a job depending on the output of the job modeled by R1. * R2 allocation cannot overlap R1, as R2 models a job depending on
* the output of the job modeled by R1.
*/ */
R_ORDER, R_ORDER,

View File

@ -27,11 +27,11 @@ import org.apache.hadoop.yarn.api.ApplicationMasterProtocol;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>ResourceRequest</code> represents the request made by an * {@code ResourceRequest} represents the request made
* application to the <code>ResourceManager</code> to obtain various * by an application to the {@code ResourceManager}
* <code>Container</code> allocations.</p> * to obtain various {@code Container} allocations.
* * <p>
* <p>It includes: * It includes:
* <ul> * <ul>
* <li>{@link Priority} of the request.</li> * <li>{@link Priority} of the request.</li>
* <li> * <li>
@ -45,13 +45,12 @@ import org.apache.hadoop.yarn.util.Records;
* by the application. * by the application.
* </li> * </li>
* <li> * <li>
* A boolean <em>relaxLocality</em> flag, defaulting to <code>true</code>, * A boolean <em>relaxLocality</em> flag, defaulting to {@code true},
* which tells the <code>ResourceManager</code> if the application wants * which tells the {@code ResourceManager} if the application wants
* locality to be loose (i.e. allows fall-through to rack or <em>any</em>) * locality to be loose (i.e. allows fall-through to rack or <em>any</em>)
* or strict (i.e. specify hard constraint on resource allocation). * or strict (i.e. specify hard constraint on resource allocation).
* </li> * </li>
* </ul> * </ul>
* </p>
* *
* @see Resource * @see Resource
* @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) * @see ApplicationMasterProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest)
@ -267,7 +266,7 @@ public abstract class ResourceRequest implements Comparable<ResourceRequest> {
/** /**
* Set node label expression of this resource request. Now only support * Set node label expression of this resource request. Now only support
* specifying a single node label. In the future we will support more complex * specifying a single node label. In the future we will support more complex
* node label expression specification like AND(&&), OR(||), etc. * node label expression specification like {@code AND(&&), OR(||)}, etc.
* *
* Any please note that node label expression now can only take effect when * Any please note that node label expression now can only take effect when
* the resource request has resourceName = ANY * the resource request has resourceName = ANY

View File

@ -733,8 +733,9 @@ public class YarnConfiguration extends Configuration {
/** /**
* How long to wait between aggregated log retention checks. If set to * How long to wait between aggregated log retention checks. If set to
* a value <= 0 then the value is computed as one-tenth of the log retention * a value {@literal <=} 0 then the value is computed as one-tenth of the
* setting. Be careful set this too small and you will spam the name node. * log retention setting. Be careful set this too small and you will spam
* the name node.
*/ */
public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS = public static final String LOG_AGGREGATION_RETAIN_CHECK_INTERVAL_SECONDS =
YARN_PREFIX + "log-aggregation.retain-check-interval-seconds"; YARN_PREFIX + "log-aggregation.retain-check-interval-seconds";

View File

@ -54,7 +54,7 @@ public abstract class UpdateNodeResourceRequest {
/** /**
* Get the map from <code>NodeId</code> to <code>ResourceOption</code>. * Get the map from <code>NodeId</code> to <code>ResourceOption</code>.
* @return the map of <NodeId, ResourceOption> * @return the map of {@code <NodeId, ResourceOption>}
*/ */
@Public @Public
@Evolving @Evolving
@ -62,7 +62,7 @@ public abstract class UpdateNodeResourceRequest {
/** /**
* Set the map from <code>NodeId</code> to <code>ResourceOption</code>. * Set the map from <code>NodeId</code> to <code>ResourceOption</code>.
* @param nodeResourceMap the map of <NodeId, ResourceOption> * @param nodeResourceMap the map of {@code <NodeId, ResourceOption>}
*/ */
@Public @Public
@Evolving @Evolving

View File

@ -56,15 +56,10 @@ public abstract class AHSClient extends AbstractService {
} }
/** /**
* <p>
* Get a report of the given Application. * Get a report of the given Application.
* </p>
*
* <p> * <p>
* In secure mode, <code>YARN</code> verifies access to the application, queue * In secure mode, <code>YARN</code> verifies access to the application, queue
* etc. before accepting the request. * etc. before accepting the request.
* </p>
*
* <p> * <p>
* If the user does not have <code>VIEW_APP</code> access then the following * If the user does not have <code>VIEW_APP</code> access then the following
* fields in the report will be set to stubbed values: * fields in the report will be set to stubbed values:
@ -77,7 +72,6 @@ public abstract class AHSClient extends AbstractService {
* <li>original tracking URL - set to "N/A"</li> * <li>original tracking URL - set to "N/A"</li>
* <li>resource usage report - all values are -1</li> * <li>resource usage report - all values are -1</li>
* </ul> * </ul>
* </p>
* *
* @param appId * @param appId
* {@link ApplicationId} of the application that needs a report * {@link ApplicationId} of the application that needs a report
@ -121,7 +115,7 @@ public abstract class AHSClient extends AbstractService {
* a report * a report
* @return application attempt report * @return application attempt report
* @throws YarnException * @throws YarnException
* @throws {@link ApplicationAttemptNotFoundException} if application attempt * @throws ApplicationAttemptNotFoundException if application attempt
* not found * not found
* @throws IOException * @throws IOException
*/ */
@ -157,7 +151,7 @@ public abstract class AHSClient extends AbstractService {
* {@link ContainerId} of the container that needs a report * {@link ContainerId} of the container that needs a report
* @return container report * @return container report
* @throws YarnException * @throws YarnException
* @throws {@link ContainerNotFoundException} if container not found * @throws ContainerNotFoundException if container not found
* @throws IOException * @throws IOException
*/ */
public abstract ContainerReport getContainerReport(ContainerId containerId) public abstract ContainerReport getContainerReport(ContainerId containerId)

View File

@ -349,7 +349,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
* Set the NM token cache for the <code>AMRMClient</code>. This cache must * Set the NM token cache for the <code>AMRMClient</code>. This cache must
* be shared with the {@link NMClient} used to manage containers for the * be shared with the {@link NMClient} used to manage containers for the
* <code>AMRMClient</code> * <code>AMRMClient</code>
* <p/> * <p>
* If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
* singleton instance will be used. * singleton instance will be used.
* *
@ -363,7 +363,7 @@ public abstract class AMRMClient<T extends AMRMClient.ContainerRequest> extends
* Get the NM token cache of the <code>AMRMClient</code>. This cache must be * Get the NM token cache of the <code>AMRMClient</code>. This cache must be
* shared with the {@link NMClient} used to manage containers for the * shared with the {@link NMClient} used to manage containers for the
* <code>AMRMClient</code>. * <code>AMRMClient</code>.
* <p/> * <p>
* If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
* singleton instance will be used. * singleton instance will be used.
* *

View File

@ -125,7 +125,7 @@ public abstract class NMClient extends AbstractService {
* Set the NM Token cache of the <code>NMClient</code>. This cache must be * Set the NM Token cache of the <code>NMClient</code>. This cache must be
* shared with the {@link AMRMClient} that requested the containers managed * shared with the {@link AMRMClient} that requested the containers managed
* by this <code>NMClient</code> * by this <code>NMClient</code>
* <p/> * <p>
* If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
* singleton instance will be used. * singleton instance will be used.
* *
@ -139,7 +139,7 @@ public abstract class NMClient extends AbstractService {
* Get the NM token cache of the <code>NMClient</code>. This cache must be * Get the NM token cache of the <code>NMClient</code>. This cache must be
* shared with the {@link AMRMClient} that requested the containers managed * shared with the {@link AMRMClient} that requested the containers managed
* by this <code>NMClient</code> * by this <code>NMClient</code>
* <p/> * <p>
* If a NM token cache is not set, the {@link NMTokenCache#getSingleton()} * If a NM token cache is not set, the {@link NMTokenCache#getSingleton()}
* singleton instance will be used. * singleton instance will be used.
* *

View File

@ -34,26 +34,26 @@ import com.google.common.annotations.VisibleForTesting;
/** /**
* NMTokenCache manages NMTokens required for an Application Master * NMTokenCache manages NMTokens required for an Application Master
* communicating with individual NodeManagers. * communicating with individual NodeManagers.
* <p/> * <p>
* By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use * By default Yarn client libraries {@link AMRMClient} and {@link NMClient} use
* {@link #getSingleton()} instance of the cache. * {@link #getSingleton()} instance of the cache.
* <ul> * <ul>
* <li>Using the singleton instance of the cache is appropriate when running a * <li>
* single ApplicationMaster in the same JVM.</li> * Using the singleton instance of the cache is appropriate when running a
* <li>When using the singleton, users don't need to do anything special, * single ApplicationMaster in the same JVM.
* {@link AMRMClient} and {@link NMClient} are already set up to use the default * </li>
* singleton {@link NMTokenCache}</li> * <li>
* When using the singleton, users don't need to do anything special,
* {@link AMRMClient} and {@link NMClient} are already set up to use the
* default singleton {@link NMTokenCache}
* </li>
* </ul> * </ul>
* <p/>
* If running multiple Application Masters in the same JVM, a different cache * If running multiple Application Masters in the same JVM, a different cache
* instance should be used for each Application Master. * instance should be used for each Application Master.
* <p/>
* <ul> * <ul>
* <li> * <li>
* If using the {@link AMRMClient} and the {@link NMClient}, setting up and using * If using the {@link AMRMClient} and the {@link NMClient}, setting up
* an instance cache is as follows: * and using an instance cache is as follows:
* <p/>
*
* <pre> * <pre>
* NMTokenCache nmTokenCache = new NMTokenCache(); * NMTokenCache nmTokenCache = new NMTokenCache();
* AMRMClient rmClient = AMRMClient.createAMRMClient(); * AMRMClient rmClient = AMRMClient.createAMRMClient();
@ -63,10 +63,8 @@ import com.google.common.annotations.VisibleForTesting;
* </pre> * </pre>
* </li> * </li>
* <li> * <li>
* If using the {@link AMRMClientAsync} and the {@link NMClientAsync}, setting up * If using the {@link AMRMClientAsync} and the {@link NMClientAsync},
* and using an instance cache is as follows: * setting up and using an instance cache is as follows:
* <p/>
*
* <pre> * <pre>
* NMTokenCache nmTokenCache = new NMTokenCache(); * NMTokenCache nmTokenCache = new NMTokenCache();
* AMRMClient rmClient = AMRMClient.createAMRMClient(); * AMRMClient rmClient = AMRMClient.createAMRMClient();
@ -81,8 +79,6 @@ import com.google.common.annotations.VisibleForTesting;
* If using {@link ApplicationMasterProtocol} and * If using {@link ApplicationMasterProtocol} and
* {@link ContainerManagementProtocol} directly, setting up and using an * {@link ContainerManagementProtocol} directly, setting up and using an
* instance cache is as follows: * instance cache is as follows:
* <p/>
*
* <pre> * <pre>
* NMTokenCache nmTokenCache = new NMTokenCache(); * NMTokenCache nmTokenCache = new NMTokenCache();
* ... * ...
@ -102,10 +98,10 @@ import com.google.common.annotations.VisibleForTesting;
* </pre> * </pre>
* </li> * </li>
* </ul> * </ul>
* It is also possible to mix the usage of a client (<code>AMRMClient</code> or * It is also possible to mix the usage of a client ({@code AMRMClient} or
* <code>NMClient</code>, or the async versions of them) with a protocol proxy ( * {@code NMClient}, or the async versions of them) with a protocol proxy
* <code>ContainerManagementProtocolProxy</code> or * ({@code ContainerManagementProtocolProxy} or
* <code>ApplicationMasterProtocol</code>). * {@code ApplicationMasterProtocol}).
*/ */
@Public @Public
@Evolving @Evolving

View File

@ -32,14 +32,12 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationDeleteResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationSubmissionResponse;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse; import org.apache.hadoop.yarn.api.protocolrecords.ReservationUpdateResponse;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -58,8 +56,10 @@ import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl; import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl;
import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException;
import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException; import org.apache.hadoop.yarn.exceptions.ApplicationIdNotProvidedException;
import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException;
import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException;
import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier;
@ -171,7 +171,6 @@ public abstract class YarnClient extends AbstractService {
* <li>original tracking URL - set to "N/A"</li> * <li>original tracking URL - set to "N/A"</li>
* <li>resource usage report - all values are -1</li> * <li>resource usage report - all values are -1</li>
* </ul> * </ul>
* </p>
* *
* @param appId * @param appId
* {@link ApplicationId} of the application that needs a report * {@link ApplicationId} of the application that needs a report
@ -184,20 +183,20 @@ public abstract class YarnClient extends AbstractService {
/** /**
* Get the AMRM token of the application. * Get the AMRM token of the application.
* <p/> * <p>
* The AMRM token is required for AM to RM scheduling operations. For * The AMRM token is required for AM to RM scheduling operations. For
* managed Application Masters Yarn takes care of injecting it. For unmanaged * managed Application Masters Yarn takes care of injecting it. For unmanaged
* Applications Masters, the token must be obtained via this method and set * Applications Masters, the token must be obtained via this method and set
* in the {@link org.apache.hadoop.security.UserGroupInformation} of the * in the {@link org.apache.hadoop.security.UserGroupInformation} of the
* current user. * current user.
* <p/> * <p>
* The AMRM token will be returned only if all the following conditions are * The AMRM token will be returned only if all the following conditions are
* met: * met:
* <li> * <ul>
* <ul>the requester is the owner of the ApplicationMaster</ul> * <li>the requester is the owner of the ApplicationMaster</li>
* <ul>the application master is an unmanaged ApplicationMaster</ul> * <li>the application master is an unmanaged ApplicationMaster</li>
* <ul>the application master is in ACCEPTED state</ul> * <li>the application master is in ACCEPTED state</li>
* </li> * </ul>
* Else this method returns NULL. * Else this method returns NULL.
* *
* @param appId {@link ApplicationId} of the application to get the AMRM token * @param appId {@link ApplicationId} of the application to get the AMRM token
@ -415,7 +414,7 @@ public abstract class YarnClient extends AbstractService {
* a report * a report
* @return application attempt report * @return application attempt report
* @throws YarnException * @throws YarnException
* @throws {@link ApplicationAttemptNotFoundException} if application attempt * @throws ApplicationAttemptNotFoundException if application attempt
* not found * not found
* @throws IOException * @throws IOException
*/ */
@ -450,7 +449,7 @@ public abstract class YarnClient extends AbstractService {
* {@link ContainerId} of the container that needs a report * {@link ContainerId} of the container that needs a report
* @return container report * @return container report
* @throws YarnException * @throws YarnException
* @throws {@link ContainerNotFoundException} if container not found. * @throws ContainerNotFoundException if container not found.
* @throws IOException * @throws IOException
*/ */
public abstract ContainerReport getContainerReport(ContainerId containerId) public abstract ContainerReport getContainerReport(ContainerId containerId)

View File

@ -344,7 +344,7 @@ public class CommonNodeLabelsManager extends AbstractService {
/** /**
* add more labels to nodes * add more labels to nodes
* *
* @param addedLabelsToNode node -> labels map * @param addedLabelsToNode node {@literal ->} labels map
*/ */
public void addLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode) public void addLabelsToNode(Map<NodeId, Set<String>> addedLabelsToNode)
throws IOException { throws IOException {
@ -614,7 +614,7 @@ public class CommonNodeLabelsManager extends AbstractService {
* remove labels from nodes, labels being removed most be contained by these * remove labels from nodes, labels being removed most be contained by these
* nodes * nodes
* *
* @param removeLabelsFromNode node -> labels map * @param removeLabelsFromNode node {@literal ->} labels map
*/ */
public void public void
removeLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode) removeLabelsFromNode(Map<NodeId, Set<String>> removeLabelsFromNode)
@ -668,7 +668,7 @@ public class CommonNodeLabelsManager extends AbstractService {
/** /**
* replace labels to nodes * replace labels to nodes
* *
* @param replaceLabelsToNode node -> labels map * @param replaceLabelsToNode node {@literal ->} labels map
*/ */
public void replaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode) public void replaceLabelsOnNode(Map<NodeId, Set<String>> replaceLabelsToNode)
throws IOException { throws IOException {

View File

@ -35,7 +35,7 @@ public abstract class NodeLabelsStore implements Closeable {
} }
/** /**
* Store node -> label * Store node {@literal ->} label
*/ */
public abstract void updateNodeToLabelsMappings( public abstract void updateNodeToLabelsMappings(
Map<NodeId, Set<String>> nodeToLabels) throws IOException; Map<NodeId, Set<String>> nodeToLabels) throws IOException;
@ -54,7 +54,6 @@ public abstract class NodeLabelsStore implements Closeable {
/** /**
* Recover labels and node to labels mappings from store * Recover labels and node to labels mappings from store
* @param conf
*/ */
public abstract void recover() throws IOException; public abstract void recover() throws IOException;

View File

@ -93,7 +93,6 @@ public class ApplicationACLsManager {
* @param applicationAccessType * @param applicationAccessType
* @param applicationOwner * @param applicationOwner
* @param applicationId * @param applicationId
* @throws AccessControlException
*/ */
public boolean checkAccess(UserGroupInformation callerUGI, public boolean checkAccess(UserGroupInformation callerUGI,
ApplicationAccessType applicationAccessType, String applicationOwner, ApplicationAccessType applicationAccessType, String applicationOwner,

View File

@ -88,7 +88,7 @@ public final class StringHelper {
} }
/** /**
* Join on slash & colon (e.g., path args in routing spec) * Join on slash and colon (e.g., path args in routing spec)
* @param args to join * @param args to join
* @return args joined with /: * @return args joined with /:
*/ */
@ -116,7 +116,7 @@ public final class StringHelper {
} }
/** /**
* Split on space & trim results. * Split on space and trim results.
* @param s the string to split * @param s the string to split
* @return an iterable of strings * @return an iterable of strings
*/ */
@ -125,7 +125,7 @@ public final class StringHelper {
} }
/** /**
* Split on _ & trim results * Split on _ and trim results
* @param s the string to split * @param s the string to split
* @return an iterable of strings * @return an iterable of strings
*/ */

View File

@ -52,13 +52,13 @@ import com.google.inject.servlet.GuiceFilter;
/** /**
* Helpers to create an embedded webapp. * Helpers to create an embedded webapp.
* *
* <h4>Quick start:</h4> * <b>Quick start:</b>
* <pre> * <pre>
* WebApp wa = WebApps.$for(myApp).start();</pre> * WebApp wa = WebApps.$for(myApp).start();</pre>
* Starts a webapp with default routes binds to 0.0.0.0 (all network interfaces) * Starts a webapp with default routes binds to 0.0.0.0 (all network interfaces)
* on an ephemeral port, which can be obtained with:<pre> * on an ephemeral port, which can be obtained with:<pre>
* int port = wa.port();</pre> * int port = wa.port();</pre>
* <h4>With more options:</h4> * <b>With more options:</b>
* <pre> * <pre>
* WebApp wa = WebApps.$for(myApp).at(address, port). * WebApp wa = WebApps.$for(myApp).at(address, port).
* with(configuration). * with(configuration).

View File

@ -116,10 +116,10 @@ public class RegistryUtils {
} }
/** /**
* Create a path to a service under a user & service class * Create a path to a service under a user and service class
* @param user username or "" * @param user username or ""
* @param serviceClass service name * @param serviceClass service name
* @param serviceName service name unique for that user & service class * @param serviceName service name unique for that user and service class
* @return a full path * @return a full path
*/ */
public static String servicePath(String user, public static String servicePath(String user,
@ -135,7 +135,7 @@ public class RegistryUtils {
* Create a path for listing components under a service * Create a path for listing components under a service
* @param user username or "" * @param user username or ""
* @param serviceClass service name * @param serviceClass service name
* @param serviceName service name unique for that user & service class * @param serviceName service name unique for that user and service class
* @return a full path * @return a full path
*/ */
public static String componentListPath(String user, public static String componentListPath(String user,
@ -149,7 +149,7 @@ public class RegistryUtils {
* Create the path to a service record for a component * Create the path to a service record for a component
* @param user username or "" * @param user username or ""
* @param serviceClass service name * @param serviceClass service name
* @param serviceName service name unique for that user & service class * @param serviceName service name unique for that user and service class
* @param componentName unique name/ID of the component * @param componentName unique name/ID of the component
* @return a full path * @return a full path
*/ */

View File

@ -32,7 +32,7 @@ import org.apache.hadoop.registry.client.impl.zk.RegistryOperationsService;
* *
* For SASL, the client must be operating in the context of an authed user. * For SASL, the client must be operating in the context of an authed user.
* *
* For id:pass the client must have the relevant id & password, SASL is * For id:pass the client must have the relevant id and password, SASL is
* not used even if the client has credentials. * not used even if the client has credentials.
* *
* For anonymous, nothing is used. * For anonymous, nothing is used.

View File

@ -71,14 +71,13 @@ public interface ZookeeperConfigOptions {
* The SASL client username: {@value}. * The SASL client username: {@value}.
* <p> * <p>
* Set this to the <i>short</i> name of the client, e.g, "user", * Set this to the <i>short</i> name of the client, e.g, "user",
* not <code>user/host</code>, or <code>user/host@REALM</code> * not {@code user/host}, or {@code user/host@REALM}
*/ */
String PROP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username"; String PROP_ZK_SASL_CLIENT_USERNAME = "zookeeper.sasl.client.username";
/** /**
* The SASL Server context, referring to a context in the JVM's * The SASL Server context, referring to a context in the JVM's
* JAAS context file: {@value} * JAAS context file: {@value}
* <p>
*/ */
String PROP_ZK_SERVER_SASL_CONTEXT = String PROP_ZK_SERVER_SASL_CONTEXT =
ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY; ZooKeeperSaslServer.LOGIN_CONTEXT_NAME_KEY;

View File

@ -47,16 +47,16 @@ import java.net.UnknownHostException;
/** /**
* This is a small, localhost Zookeeper service instance that is contained * This is a small, localhost Zookeeper service instance that is contained
* in a YARN service...it's been derived from Apache Twill. * in a YARN service...it's been derived from Apache Twill.
* * <p>
* It implements {@link RegistryBindingSource} and provides binding information, * It implements {@link RegistryBindingSource} and provides binding information,
* <i>once started</i>. Until <code>start()</code> is called, the hostname & * <i>once started</i>. Until {@link #start()} is called, the hostname and
* port may be undefined. Accordingly, the service raises an exception in this * port may be undefined. Accordingly, the service raises an exception in this
* condition. * condition.
* * <p>
* If you wish to chain together a registry service with this one under * If you wish to chain together a registry service with this one under
* the same <code>CompositeService</code>, this service must be added * the same {@code CompositeService}, this service must be added
* as a child first. * as a child first.
* * <p>
* It also sets the configuration parameter * It also sets the configuration parameter
* {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM} * {@link RegistryConstants#KEY_REGISTRY_ZK_QUORUM}
* to its connection string. Any code with access to the service configuration * to its connection string. Any code with access to the service configuration

View File

@ -19,7 +19,8 @@
/** /**
* Basic services for the YARN registry * Basic services for the YARN registry
* <ul> * <ul>
* <li>The {@link org.apache.hadoop.registry.server.services.RegistryAdminService}</ol> * <li>
* The {@link org.apache.hadoop.registry.server.services.RegistryAdminService}
* extends the shared Yarn Registry client with registry setup and * extends the shared Yarn Registry client with registry setup and
* (potentially asynchronous) administrative actions. * (potentially asynchronous) administrative actions.
* </li> * </li>
@ -33,8 +34,6 @@
* extends the standard YARN composite service by making its add and remove * extends the standard YARN composite service by making its add and remove
* methods public. It is a utility service used in parts of the codebase * methods public. It is a utility service used in parts of the codebase
* </li> * </li>
*
* </ul> * </ul>
*
*/ */
package org.apache.hadoop.registry.server.services; package org.apache.hadoop.registry.server.services;

View File

@ -45,17 +45,15 @@ import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier;
import com.google.common.annotations.VisibleForTesting; import com.google.common.annotations.VisibleForTesting;
/** /**
* <p>
* Initializes {@link TimelineAuthenticationFilter} which provides support for * Initializes {@link TimelineAuthenticationFilter} which provides support for
* Kerberos HTTP SPNEGO authentication. * Kerberos HTTP SPNEGO authentication.
* <p/>
* <p> * <p>
* It enables Kerberos HTTP SPNEGO plus delegation token authentication for the * It enables Kerberos HTTP SPNEGO plus delegation token authentication for the
* timeline server. * timeline server.
* <p/> * <p>
* Refer to the <code>core-default.xml</code> file, after the comment 'HTTP * Refer to the {@code core-default.xml} file, after the comment 'HTTP
* Authentication' for details on the configuration options. All related * Authentication' for details on the configuration options. All related
* configuration properties have 'hadoop.http.authentication.' as prefix. * configuration properties have {@code hadoop.http.authentication.} as prefix.
*/ */
public class TimelineAuthenticationFilterInitializer extends FilterInitializer { public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
@ -71,14 +69,11 @@ public class TimelineAuthenticationFilterInitializer extends FilterInitializer {
Map<String, String> filterConfig; Map<String, String> filterConfig;
/** /**
* <p>
* Initializes {@link TimelineAuthenticationFilter} * Initializes {@link TimelineAuthenticationFilter}
* <p/>
* <p> * <p>
* Propagates to {@link TimelineAuthenticationFilter} configuration all YARN * Propagates to {@link TimelineAuthenticationFilter} configuration all YARN
* configuration properties prefixed with * configuration properties prefixed with
* "yarn.timeline-service.authentication." * {@code yarn.timeline-service.authentication.}
* </p>
* *
* @param container * @param container
* The filter container * The filter container

View File

@ -40,7 +40,7 @@ public class ZKClient {
* the zookeeper client library to * the zookeeper client library to
* talk to zookeeper * talk to zookeeper
* @param string the host * @param string the host
* @throws throws IOException * @throws IOException
*/ */
public ZKClient(String string) throws IOException { public ZKClient(String string) throws IOException {
zkClient = new ZooKeeper(string, 30000, new ZKWatcher()); zkClient = new ZooKeeper(string, 30000, new ZKWatcher());

View File

@ -52,7 +52,8 @@ public abstract class RegisterNodeManagerRequest {
* We introduce this here because currently YARN RM doesn't persist nodes info * We introduce this here because currently YARN RM doesn't persist nodes info
* for application running. When RM restart happened, we cannot determinate if * for application running. When RM restart happened, we cannot determinate if
* a node should do application cleanup (like log-aggregation, status update, * a node should do application cleanup (like log-aggregation, status update,
* etc.) or not. <p/> * etc.) or not.
* <p>
* When we have this running application list in node manager register * When we have this running application list in node manager register
* request, we can recover nodes info for running applications. And then we * request, we can recover nodes info for running applications. And then we
* can take actions accordingly * can take actions accordingly

View File

@ -26,10 +26,9 @@ import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.util.Records; import org.apache.hadoop.yarn.util.Records;
/** /**
* <p><code>NodeHealthStatus</code> is a summary of the health status of the * {@code NodeHealthStatus} is a summary of the health status of the node.
* node.</p> * <p>
* * It includes information such as:
* <p>It includes information such as:
* <ul> * <ul>
* <li> * <li>
* An indicator of whether the node is healthy, as determined by the * An indicator of whether the node is healthy, as determined by the
@ -38,7 +37,6 @@ import org.apache.hadoop.yarn.util.Records;
* <li>The previous time at which the health status was reported.</li> * <li>The previous time at which the health status was reported.</li>
* <li>A diagnostic report on the health status.</li> * <li>A diagnostic report on the health status.</li>
* </ul> * </ul>
* </p>
* *
* @see NodeReport * @see NodeReport
* @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest) * @see ApplicationClientProtocol#getClusterNodes(org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest)

View File

@ -102,13 +102,15 @@ public abstract class ContainerExecutor implements Configurable {
/** /**
* Prepare the environment for containers in this application to execute. * Prepare the environment for containers in this application to execute.
* <pre>
* For $x in local.dirs * For $x in local.dirs
* create $x/$user/$appId * create $x/$user/$appId
* Copy $nmLocal/appTokens -> $N/$user/$appId * Copy $nmLocal/appTokens {@literal ->} $N/$user/$appId
* For $rsrc in private resources * For $rsrc in private resources
* Copy $rsrc -> $N/$user/filecache/[idef] * Copy $rsrc {@literal ->} $N/$user/filecache/[idef]
* For $rsrc in job resources * For $rsrc in job resources
* Copy $rsrc -> $N/$user/$appId/filecache/idef * Copy $rsrc {@literal ->} $N/$user/$appId/filecache/idef
* </pre>
* @param user user name of application owner * @param user user name of application owner
* @param appId id of the application * @param appId id of the application
* @param nmPrivateContainerTokens path to localized credentials, rsrc by NM * @param nmPrivateContainerTokens path to localized credentials, rsrc by NM

View File

@ -65,11 +65,11 @@ public class NodeManagerHardwareUtils {
} }
/** /**
* Gets the percentage of physical CPU that is configured for YARN containers * Gets the percentage of physical CPU that is configured for YARN containers.
* This is percent > 0 and <= 100 based on * This is percent {@literal >} 0 and {@literal <=} 100 based on
* YarnConfiguration.NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT * {@link YarnConfiguration#NM_RESOURCE_PERCENTAGE_PHYSICAL_CPU_LIMIT}
* @param conf Configuration object * @param conf Configuration object
* @return percent > 0 and <= 100 * @return percent {@literal >} 0 and {@literal <=} 100
*/ */
public static int getNodeCpuPercentage(Configuration conf) { public static int getNodeCpuPercentage(Configuration conf) {
int nodeCpuPercentage = int nodeCpuPercentage =

View File

@ -216,10 +216,13 @@ public interface RMAppAttempt extends EventHandler<RMAppAttemptEvent> {
/** /**
* Return the flag which indicates whether the attempt failure should be * Return the flag which indicates whether the attempt failure should be
* counted to attempt retry count. * counted to attempt retry count.
* <ul> * <p>
* There failure types should not be counted to attempt retry count: * There failure types should not be counted to attempt retry count:
* <ul>
* <li>preempted by the scheduler.</li> * <li>preempted by the scheduler.</li>
* <li>hardware failures, such as NM failing, lost NM and NM disk errors.</li> * <li>
* hardware failures, such as NM failing, lost NM and NM disk errors.
* </li>
* <li>killed by RM because of RM restart or failover.</li> * <li>killed by RM because of RM restart or failover.</li>
* </ul> * </ul>
*/ */

View File

@ -114,7 +114,7 @@ public abstract class SchedulerNode {
/** /**
* Get the name of the node for scheduling matching decisions. * Get the name of the node for scheduling matching decisions.
* <p/> * <p>
* Typically this is the 'hostname' reported by the node, but it could be * Typically this is the 'hostname' reported by the node, but it could be
* configured to be 'hostname:port' reported by the node via the * configured to be 'hostname:port' reported by the node via the
* {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant. * {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant.

View File

@ -194,8 +194,7 @@ public class SchedulerUtils {
* Utility method to validate a resource request, by insuring that the * Utility method to validate a resource request, by insuring that the
* requested memory/vcore is non-negative and not greater than max * requested memory/vcore is non-negative and not greater than max
* *
* @throws <code>InvalidResourceRequestException</code> when there is invalid * @throws InvalidResourceRequestException when there is invalid request
* request
*/ */
public static void validateResourceRequest(ResourceRequest resReq, public static void validateResourceRequest(ResourceRequest resReq,
Resource maximumResource, String queueName, YarnScheduler scheduler) Resource maximumResource, String queueName, YarnScheduler scheduler)

View File

@ -71,7 +71,7 @@ public class ComputeFairShares {
* fair shares. The min and max shares and of the Schedulables are assumed to * fair shares. The min and max shares and of the Schedulables are assumed to
* be set beforehand. We compute the fairest possible allocation of shares to * be set beforehand. We compute the fairest possible allocation of shares to
* the Schedulables that respects their min and max shares. * the Schedulables that respects their min and max shares.
* * <p>
* To understand what this method does, we must first define what weighted * To understand what this method does, we must first define what weighted
* fair sharing means in the presence of min and max shares. If there * fair sharing means in the presence of min and max shares. If there
* were no minimum or maximum shares, then weighted fair sharing would be * were no minimum or maximum shares, then weighted fair sharing would be
@ -79,30 +79,31 @@ public class ComputeFairShares {
* Schedulable and all slots were assigned. Minimum and maximum shares add a * Schedulable and all slots were assigned. Minimum and maximum shares add a
* further twist - Some Schedulables may have a min share higher than their * further twist - Some Schedulables may have a min share higher than their
* assigned share or a max share lower than their assigned share. * assigned share or a max share lower than their assigned share.
* * <p>
* To deal with these possibilities, we define an assignment of slots as being * To deal with these possibilities, we define an assignment of slots as being
* fair if there exists a ratio R such that: Schedulables S where S.minShare * fair if there exists a ratio R such that: Schedulables S where S.minShare
* > R * S.weight are given share S.minShare - Schedulables S where S.maxShare * {@literal >} R * S.weight are given share S.minShare - Schedulables S
* < R * S.weight are given S.maxShare - All other Schedulables S are * where S.maxShare {@literal <} R * S.weight are given S.maxShare -
* assigned share R * S.weight - The sum of all the shares is totalSlots. * All other Schedulables S are assigned share R * S.weight -
* * The sum of all the shares is totalSlots.
* <p>
* We call R the weight-to-slots ratio because it converts a Schedulable's * We call R the weight-to-slots ratio because it converts a Schedulable's
* weight to the number of slots it is assigned. * weight to the number of slots it is assigned.
* * <p>
* We compute a fair allocation by finding a suitable weight-to-slot ratio R. * We compute a fair allocation by finding a suitable weight-to-slot ratio R.
* To do this, we use binary search. Given a ratio R, we compute the number of * To do this, we use binary search. Given a ratio R, we compute the number of
* slots that would be used in total with this ratio (the sum of the shares * slots that would be used in total with this ratio (the sum of the shares
* computed using the conditions above). If this number of slots is less than * computed using the conditions above). If this number of slots is less than
* totalSlots, then R is too small and more slots could be assigned. If the * totalSlots, then R is too small and more slots could be assigned. If the
* number of slots is more than totalSlots, then R is too large. * number of slots is more than totalSlots, then R is too large.
* * <p>
* We begin the binary search with a lower bound on R of 0 (which means that * We begin the binary search with a lower bound on R of 0 (which means that
* all Schedulables are only given their minShare) and an upper bound computed * all Schedulables are only given their minShare) and an upper bound computed
* to be large enough that too many slots are given (by doubling R until we * to be large enough that too many slots are given (by doubling R until we
* use more than totalResources resources). The helper method * use more than totalResources resources). The helper method
* resourceUsedWithWeightToResourceRatio computes the total resources used with a * resourceUsedWithWeightToResourceRatio computes the total resources used with a
* given value of R. * given value of R.
* * <p>
* The running time of this algorithm is linear in the number of Schedulables, * The running time of this algorithm is linear in the number of Schedulables,
* because resourceUsedWithWeightToResourceRatio is linear-time and the number of * because resourceUsedWithWeightToResourceRatio is linear-time and the number of
* iterations of binary search is a constant (dependent on desired precision). * iterations of binary search is a constant (dependent on desired precision).

View File

@ -364,7 +364,6 @@ public class DelegationTokenRenewer extends AbstractService {
* @param shouldCancelAtEnd true if tokens should be canceled when the app is * @param shouldCancelAtEnd true if tokens should be canceled when the app is
* done else false. * done else false.
* @param user user * @param user user
* @throws IOException
*/ */
public void addApplicationAsync(ApplicationId applicationId, Credentials ts, public void addApplicationAsync(ApplicationId applicationId, Credentials ts,
boolean shouldCancelAtEnd, String user) { boolean shouldCancelAtEnd, String user) {
@ -634,7 +633,6 @@ public class DelegationTokenRenewer extends AbstractService {
/** /**
* removing failed DT * removing failed DT
* @param applicationId
*/ */
private void removeFailedDelegationToken(DelegationTokenToRenew t) { private void removeFailedDelegationToken(DelegationTokenToRenew t) {
ApplicationId applicationId = t.applicationId; ApplicationId applicationId = t.applicationId;

View File

@ -181,7 +181,7 @@ public class ProxyUriUtils {
/** /**
* Returns the scheme if present in the url * Returns the scheme if present in the url
* eg. "https://issues.apache.org/jira/browse/YARN" > "https" * eg. "https://issues.apache.org/jira/browse/YARN" {@literal ->} "https"
*/ */
public static String getSchemeFromUrl(String url) { public static String getSchemeFromUrl(String url) {
int index = 0; int index = 0;