MAPREDUCE-2863. Support web services for YARN and MR components. (Thomas Graves via vinodkv)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1213975 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
37b8cc3f19
commit
0ea8570be5
|
@ -85,6 +85,9 @@ Release 0.23.1 - Unreleased
|
|||
|
||||
MAPREDUCE-3121. NodeManager should handle disk-failures (Ravi Gummadi via mahadev)
|
||||
|
||||
MAPREDUCE-2863. Support web services for YARN and MR components. (Thomas
|
||||
Graves via vinodkv)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
MAPREDUCE-3297. Moved log related components into yarn-common so that
|
||||
|
|
|
@ -151,7 +151,7 @@ public class MRClientService extends AbstractService
|
|||
+ ":" + server.getPort());
|
||||
LOG.info("Instantiated MRClientService at " + this.bindAddress);
|
||||
try {
|
||||
webApp = WebApps.$for("mapreduce", AppContext.class, appContext).with(conf).
|
||||
webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").with(conf).
|
||||
start(new AMWebApp());
|
||||
} catch (Exception e) {
|
||||
LOG.error("Webapps failed to start. Ignoring for now:", e);
|
||||
|
|
|
@ -18,8 +18,9 @@
|
|||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
|
||||
|
||||
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
|
||||
/**
|
||||
|
@ -29,6 +30,9 @@ public class AMWebApp extends WebApp implements AMParams {
|
|||
|
||||
@Override
|
||||
public void setup() {
|
||||
bind(JAXBContextResolver.class);
|
||||
bind(GenericExceptionHandler.class);
|
||||
bind(AMWebServices.class);
|
||||
route("/", AppController.class);
|
||||
route("/app", AppController.class);
|
||||
route(pajoin("/job", JOB_ID), AppController.class, "job");
|
||||
|
|
|
@ -0,0 +1,362 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response.Status;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
||||
@Path("/ws/v1/mapreduce")
|
||||
public class AMWebServices {
|
||||
private final AppContext appCtx;
|
||||
private final App app;
|
||||
private final Configuration conf;
|
||||
|
||||
@Inject
|
||||
public AMWebServices(final App app, final AppContext context,
|
||||
final Configuration conf) {
|
||||
this.appCtx = context;
|
||||
this.app = app;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
Boolean hasAccess(Job job, HttpServletRequest request) {
|
||||
UserGroupInformation callerUgi = UserGroupInformation
|
||||
.createRemoteUser(request.getRemoteUser());
|
||||
if (!job.checkAccess(callerUgi, JobACL.VIEW_JOB)) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* check for job access.
|
||||
*
|
||||
* @param job
|
||||
* the job that is being accessed
|
||||
*/
|
||||
void checkAccess(Job job, HttpServletRequest request) {
|
||||
if (!hasAccess(job, request)) {
|
||||
throw new WebApplicationException(Status.UNAUTHORIZED);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AppInfo get() {
|
||||
return getAppInfo();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/info")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AppInfo getAppInfo() {
|
||||
return new AppInfo(this.app, this.app.context);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobsInfo getJobs(@Context HttpServletRequest hsr) {
|
||||
JobsInfo allJobs = new JobsInfo();
|
||||
for (Job job : appCtx.getAllJobs().values()) {
|
||||
// getAllJobs only gives you a partial we want a full
|
||||
Job fullJob = appCtx.getJob(job.getID());
|
||||
if (fullJob == null) {
|
||||
continue;
|
||||
}
|
||||
allJobs.add(new JobInfo(fullJob, hasAccess(fullJob, hsr)));
|
||||
}
|
||||
return allJobs;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobInfo getJob(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
return new JobInfo(job, hasAccess(job, hsr));
|
||||
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/counters")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobCounterInfo getJobCounters(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
return new JobCounterInfo(this.appCtx, job);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/tasks/{taskid}/counters")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobTaskCounterInfo getSingleTaskCounters(
|
||||
@Context HttpServletRequest hsr, @PathParam("jobid") String jid,
|
||||
@PathParam("taskid") String tid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = this.appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
return new JobTaskCounterInfo(task);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/conf")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ConfInfo getJobConf(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
ConfInfo info;
|
||||
try {
|
||||
info = new ConfInfo(job, this.conf);
|
||||
} catch (IOException e) {
|
||||
throw new NotFoundException("unable to load configuration for job: " + jid);
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/tasks")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TasksInfo getJobTasks(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid, @QueryParam("type") String type) {
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
TasksInfo allTasks = new TasksInfo();
|
||||
for (Task task : job.getTasks().values()) {
|
||||
TaskType ttype = null;
|
||||
if (type != null && !type.isEmpty()) {
|
||||
try {
|
||||
ttype = MRApps.taskType(type);
|
||||
} catch (YarnException e) {
|
||||
throw new BadRequestException("tasktype must be either m or r"); }
|
||||
}
|
||||
if (ttype != null && task.getType() != ttype) {
|
||||
continue;
|
||||
}
|
||||
allTasks.add(new TaskInfo(task));
|
||||
}
|
||||
return allTasks;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/tasks/{taskid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TaskInfo getJobTask(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
return new TaskInfo(task);
|
||||
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/tasks/{taskid}/attempts")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TaskAttemptsInfo getJobTaskAttempts(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
|
||||
TaskAttemptsInfo attempts = new TaskAttemptsInfo();
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
for (TaskAttempt ta : task.getAttempts().values()) {
|
||||
if (ta != null) {
|
||||
if (task.getType() == TaskType.REDUCE) {
|
||||
attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
|
||||
} else {
|
||||
attempts.add(new TaskAttemptInfo(ta, task.getType(), true));
|
||||
}
|
||||
}
|
||||
}
|
||||
return attempts;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TaskAttemptInfo getJobTaskAttemptId(@Context HttpServletRequest hsr,
|
||||
@PathParam("jobid") String jid, @PathParam("taskid") String tid,
|
||||
@PathParam("attemptid") String attId) {
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId);
|
||||
if (attemptId == null) {
|
||||
throw new NotFoundException("task attempt id " + attId
|
||||
+ " not found or invalid");
|
||||
}
|
||||
TaskAttempt ta = task.getAttempt(attemptId);
|
||||
if (ta == null) {
|
||||
throw new NotFoundException("Error getting info on task attempt id "
|
||||
+ attId);
|
||||
}
|
||||
if (task.getType() == TaskType.REDUCE) {
|
||||
return new ReduceTaskAttemptInfo(ta, task.getType());
|
||||
} else {
|
||||
return new TaskAttemptInfo(ta, task.getType(), true);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(
|
||||
@Context HttpServletRequest hsr, @PathParam("jobid") String jid,
|
||||
@PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = this.appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
checkAccess(job, hsr);
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId);
|
||||
if (attemptId == null) {
|
||||
throw new NotFoundException("task attempt id " + attId
|
||||
+ " not found or invalid");
|
||||
}
|
||||
TaskAttempt ta = task.getAttempt(attemptId);
|
||||
if (ta == null) {
|
||||
throw new NotFoundException("Error getting info on task attempt id "
|
||||
+ attId);
|
||||
}
|
||||
return new JobTaskAttemptCounterInfo(ta);
|
||||
}
|
||||
}
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.mapreduce.JobACL;
|
|||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
|
@ -72,13 +73,14 @@ public class AppController extends Controller implements AMParams {
|
|||
* Render the /info page with an overview of current application.
|
||||
*/
|
||||
public void info() {
|
||||
AppInfo info = new AppInfo(app, app.context);
|
||||
info("Application Master Overview").
|
||||
_("Application ID:", $(APP_ID)).
|
||||
_("Application Name:", app.context.getApplicationName()).
|
||||
_("User:", app.context.getUser()).
|
||||
_("Started on:", Times.format(app.context.getStartTime())).
|
||||
_("Application ID:", info.getId()).
|
||||
_("Application Name:", info.getName()).
|
||||
_("User:", info.getUser()).
|
||||
_("Started on:", Times.format(info.getStartTime())).
|
||||
_("Elasped: ", org.apache.hadoop.util.StringUtils.formatTime(
|
||||
Times.elapsed(app.context.getStartTime(), 0)));
|
||||
info.getElapsedTime() ));
|
||||
render(InfoPage.class);
|
||||
}
|
||||
|
||||
|
|
|
@ -22,14 +22,14 @@ import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
|
|||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
|
@ -71,11 +71,8 @@ public class ConfBlock extends HtmlBlock {
|
|||
}
|
||||
Path confPath = job.getConfFile();
|
||||
try {
|
||||
//Read in the configuration file and put it in a key/value table.
|
||||
FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
|
||||
Configuration jobConf = new Configuration(false);
|
||||
jobConf.addResource(fc.open(confPath));
|
||||
|
||||
ConfInfo info = new ConfInfo(job, this.conf);
|
||||
|
||||
html.div().h3(confPath.toString())._();
|
||||
TBODY<TABLE<Hamlet>> tbody = html.
|
||||
// Tasks table
|
||||
|
@ -87,10 +84,10 @@ public class ConfBlock extends HtmlBlock {
|
|||
_().
|
||||
_().
|
||||
tbody();
|
||||
for(Map.Entry<String, String> entry : jobConf) {
|
||||
for (ConfEntryInfo entry : info.getProperties()) {
|
||||
tbody.
|
||||
tr().
|
||||
td(entry.getKey()).
|
||||
td(entry.getName()).
|
||||
td(entry.getValue()).
|
||||
_();
|
||||
}
|
||||
|
|
|
@ -0,0 +1,77 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.Arrays;
|
||||
|
||||
import com.sun.jersey.api.json.JSONConfiguration;
|
||||
import com.sun.jersey.api.json.JSONJAXBContext;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
import javax.ws.rs.ext.ContextResolver;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterGroupInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
|
||||
|
||||
@Singleton
|
||||
@Provider
|
||||
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
|
||||
|
||||
private JAXBContext context;
|
||||
private final Set<Class> types;
|
||||
|
||||
// you have to specify all the dao classes here
|
||||
private final Class[] cTypes = {AppInfo.class, CounterInfo.class,
|
||||
JobTaskAttemptCounterInfo.class, JobTaskCounterInfo.class,
|
||||
TaskCounterGroupInfo.class, ConfInfo.class, JobCounterInfo.class,
|
||||
TaskCounterInfo.class, CounterGroupInfo.class, JobInfo.class,
|
||||
JobsInfo.class, ReduceTaskAttemptInfo.class, TaskAttemptInfo.class,
|
||||
TaskInfo.class, TasksInfo.class, TaskAttemptsInfo.class,
|
||||
ConfEntryInfo.class};
|
||||
|
||||
public JAXBContextResolver() throws Exception {
|
||||
this.types = new HashSet<Class>(Arrays.asList(cTypes));
|
||||
this.context = new JSONJAXBContext(JSONConfiguration.natural().
|
||||
rootUnwrapping(false).build(), cTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JAXBContext getContext(Class<?> objectType) {
|
||||
return (types.contains(objectType)) ? context : null;
|
||||
}
|
||||
}
|
|
@ -18,47 +18,32 @@
|
|||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.JOB_ID;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._EVEN;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._ODD;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH;
|
||||
|
||||
import java.util.Date;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
||||
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
||||
public class JobBlock extends HtmlBlock {
|
||||
final AppContext appContext;
|
||||
|
||||
int runningMapTasks = 0;
|
||||
int pendingMapTasks = 0;
|
||||
int runningReduceTasks = 0;
|
||||
int pendingReduceTasks = 0;
|
||||
|
||||
int newMapAttempts = 0;
|
||||
int runningMapAttempts = 0;
|
||||
int killedMapAttempts = 0;
|
||||
int failedMapAttempts = 0;
|
||||
int successfulMapAttempts = 0;
|
||||
int newReduceAttempts = 0;
|
||||
int runningReduceAttempts = 0;
|
||||
int killedReduceAttempts = 0;
|
||||
int failedReduceAttempts = 0;
|
||||
int successfulReduceAttempts = 0;
|
||||
|
||||
@Inject JobBlock(AppContext appctx) {
|
||||
appContext = appctx;
|
||||
}
|
||||
|
@ -77,23 +62,13 @@ public class JobBlock extends HtmlBlock {
|
|||
p()._("Sorry, ", jid, " not found.")._();
|
||||
return;
|
||||
}
|
||||
JobReport jobReport = job.getReport();
|
||||
String mapPct = percent(jobReport.getMapProgress());
|
||||
String reducePct = percent(jobReport.getReduceProgress());
|
||||
int mapTasks = job.getTotalMaps();
|
||||
int mapTasksComplete = job.getCompletedMaps();
|
||||
int reduceTasks = job.getTotalReduces();
|
||||
int reducesTasksComplete = job.getCompletedReduces();
|
||||
long startTime = jobReport.getStartTime();
|
||||
long finishTime = jobReport.getFinishTime();
|
||||
countTasksAndAttempts(job);
|
||||
JobInfo jinfo = new JobInfo(job, true);
|
||||
info("Job Overview").
|
||||
_("Job Name:", job.getName()).
|
||||
_("State:", job.getState()).
|
||||
_("Uberized:", job.isUber()).
|
||||
_("Started:", new Date(startTime)).
|
||||
_("Elapsed:", StringUtils.formatTime(
|
||||
Times.elapsed(startTime, finishTime)));
|
||||
_("Job Name:", jinfo.getName()).
|
||||
_("State:", jinfo.getState()).
|
||||
_("Uberized:", jinfo.isUberized()).
|
||||
_("Started:", new Date(jinfo.getStartTime())).
|
||||
_("Elapsed:", StringUtils.formatTime(jinfo.getElapsedTime()));
|
||||
html.
|
||||
_(InfoBlock.class).
|
||||
div(_INFO_WRAP).
|
||||
|
@ -112,25 +87,25 @@ public class JobBlock extends HtmlBlock {
|
|||
a(url("tasks", jid, "m"), "Map")._().
|
||||
td().
|
||||
div(_PROGRESSBAR).
|
||||
$title(join(mapPct, '%')). // tooltip
|
||||
$title(join(jinfo.getMapProgressPercent(), '%')). // tooltip
|
||||
div(_PROGRESSBAR_VALUE).
|
||||
$style(join("width:", mapPct, '%'))._()._()._().
|
||||
td(String.valueOf(mapTasks)).
|
||||
td(String.valueOf(pendingMapTasks)).
|
||||
td(String.valueOf(runningMapTasks)).
|
||||
td(String.valueOf(mapTasksComplete))._().
|
||||
$style(join("width:", jinfo.getMapProgressPercent(), '%'))._()._()._().
|
||||
td(String.valueOf(jinfo.getMapsTotal())).
|
||||
td(String.valueOf(jinfo.getMapsPending())).
|
||||
td(String.valueOf(jinfo.getMapsRunning())).
|
||||
td(String.valueOf(jinfo.getMapsCompleted()))._().
|
||||
tr(_EVEN).
|
||||
th().
|
||||
a(url("tasks", jid, "r"), "Reduce")._().
|
||||
td().
|
||||
div(_PROGRESSBAR).
|
||||
$title(join(reducePct, '%')). // tooltip
|
||||
$title(join(jinfo.getReduceProgressPercent(), '%')). // tooltip
|
||||
div(_PROGRESSBAR_VALUE).
|
||||
$style(join("width:", reducePct, '%'))._()._()._().
|
||||
td(String.valueOf(reduceTasks)).
|
||||
td(String.valueOf(pendingReduceTasks)).
|
||||
td(String.valueOf(runningReduceTasks)).
|
||||
td(String.valueOf(reducesTasksComplete))._()
|
||||
$style(join("width:", jinfo.getReduceProgressPercent(), '%'))._()._()._().
|
||||
td(String.valueOf(jinfo.getReducesTotal())).
|
||||
td(String.valueOf(jinfo.getReducesPending())).
|
||||
td(String.valueOf(jinfo.getReducesRunning())).
|
||||
td(String.valueOf(jinfo.getReducesCompleted()))._()
|
||||
._().
|
||||
|
||||
// Attempts table
|
||||
|
@ -145,110 +120,41 @@ public class JobBlock extends HtmlBlock {
|
|||
tr(_ODD).
|
||||
th("Maps").
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.NEW.toString()),
|
||||
String.valueOf(newMapAttempts))._().
|
||||
TaskAttemptStateUI.NEW.toString()),
|
||||
String.valueOf(jinfo.getNewMapAttempts()))._().
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.RUNNING.toString()),
|
||||
String.valueOf(runningMapAttempts))._().
|
||||
TaskAttemptStateUI.RUNNING.toString()),
|
||||
String.valueOf(jinfo.getRunningMapAttempts()))._().
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.FAILED.toString()),
|
||||
String.valueOf(failedMapAttempts))._().
|
||||
TaskAttemptStateUI.FAILED.toString()),
|
||||
String.valueOf(jinfo.getFailedMapAttempts()))._().
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.KILLED.toString()),
|
||||
String.valueOf(killedMapAttempts))._().
|
||||
TaskAttemptStateUI.KILLED.toString()),
|
||||
String.valueOf(jinfo.getKilledMapAttempts()))._().
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.SUCCESSFUL.toString()),
|
||||
String.valueOf(successfulMapAttempts))._().
|
||||
TaskAttemptStateUI.SUCCESSFUL.toString()),
|
||||
String.valueOf(jinfo.getSuccessfulMapAttempts()))._().
|
||||
_().
|
||||
tr(_EVEN).
|
||||
th("Reduces").
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.NEW.toString()),
|
||||
String.valueOf(newReduceAttempts))._().
|
||||
TaskAttemptStateUI.NEW.toString()),
|
||||
String.valueOf(jinfo.getNewReduceAttempts()))._().
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.RUNNING.toString()),
|
||||
String.valueOf(runningReduceAttempts))._().
|
||||
TaskAttemptStateUI.RUNNING.toString()),
|
||||
String.valueOf(jinfo.getRunningReduceAttempts()))._().
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.FAILED.toString()),
|
||||
String.valueOf(failedReduceAttempts))._().
|
||||
TaskAttemptStateUI.FAILED.toString()),
|
||||
String.valueOf(jinfo.getFailedReduceAttempts()))._().
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.KILLED.toString()),
|
||||
String.valueOf(killedReduceAttempts))._().
|
||||
TaskAttemptStateUI.KILLED.toString()),
|
||||
String.valueOf(jinfo.getKilledReduceAttempts()))._().
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.SUCCESSFUL.toString()),
|
||||
String.valueOf(successfulReduceAttempts))._().
|
||||
TaskAttemptStateUI.SUCCESSFUL.toString()),
|
||||
String.valueOf(jinfo.getSuccessfulReduceAttempts()))._().
|
||||
_().
|
||||
_().
|
||||
_();
|
||||
}
|
||||
|
||||
private void countTasksAndAttempts(Job job) {
|
||||
Map<TaskId, Task> tasks = job.getTasks();
|
||||
for (Task task : tasks.values()) {
|
||||
switch (task.getType()) {
|
||||
case MAP:
|
||||
// Task counts
|
||||
switch (task.getState()) {
|
||||
case RUNNING:
|
||||
++runningMapTasks;
|
||||
break;
|
||||
case SCHEDULED:
|
||||
++pendingMapTasks;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case REDUCE:
|
||||
// Task counts
|
||||
switch (task.getState()) {
|
||||
case RUNNING:
|
||||
++runningReduceTasks;
|
||||
break;
|
||||
case SCHEDULED:
|
||||
++pendingReduceTasks;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Attempts counts
|
||||
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
|
||||
for (TaskAttempt attempt : attempts.values()) {
|
||||
|
||||
int newAttempts = 0, running = 0, successful = 0, failed = 0, killed =0;
|
||||
|
||||
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
|
||||
++newAttempts;
|
||||
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt
|
||||
.getState())) {
|
||||
++running;
|
||||
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
|
||||
.getState())) {
|
||||
++successful;
|
||||
} else if (TaskAttemptStateUI.FAILED
|
||||
.correspondsTo(attempt.getState())) {
|
||||
++failed;
|
||||
} else if (TaskAttemptStateUI.KILLED
|
||||
.correspondsTo(attempt.getState())) {
|
||||
++killed;
|
||||
}
|
||||
|
||||
switch (task.getType()) {
|
||||
case MAP:
|
||||
newMapAttempts += newAttempts;
|
||||
runningMapAttempts += running;
|
||||
successfulMapAttempts += successful;
|
||||
failedMapAttempts += failed;
|
||||
killedMapAttempts += killed;
|
||||
break;
|
||||
case REDUCE:
|
||||
newReduceAttempts += newAttempts;
|
||||
runningReduceAttempts += running;
|
||||
successfulReduceAttempts += successful;
|
||||
failedReduceAttempts += failed;
|
||||
killedReduceAttempts += killed;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,18 +18,19 @@
|
|||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
|
||||
import com.google.inject.Inject;
|
||||
|
||||
public class JobsBlock extends HtmlBlock {
|
||||
final AppContext appContext;
|
||||
|
@ -54,38 +55,31 @@ public class JobsBlock extends HtmlBlock {
|
|||
th("Reduces Total").
|
||||
th("Reduces Completed")._()._().
|
||||
tbody();
|
||||
for (Job job : appContext.getAllJobs().values()) {
|
||||
String jobID = MRApps.toString(job.getID());
|
||||
JobReport report = job.getReport();
|
||||
String mapPct = percent(report.getMapProgress());
|
||||
String mapsTotal = String.valueOf(job.getTotalMaps());
|
||||
String mapsCompleted = String.valueOf(job.getCompletedMaps());
|
||||
String reducePct = percent(report.getReduceProgress());
|
||||
String reduceTotal = String.valueOf(job.getTotalReduces());
|
||||
String reduceCompleted = String.valueOf(job.getCompletedReduces());
|
||||
for (Job j : appContext.getAllJobs().values()) {
|
||||
JobInfo job = new JobInfo(j, false);
|
||||
tbody.
|
||||
tr().
|
||||
td().
|
||||
span().$title(String.valueOf(job.getID().getId()))._(). // for sorting
|
||||
a(url("job", jobID), jobID)._().
|
||||
td(job.getName().toString()).
|
||||
td(job.getState().toString()).
|
||||
span().$title(String.valueOf(job.getId()))._(). // for sorting
|
||||
a(url("job", job.getId()), job.getId())._().
|
||||
td(job.getName()).
|
||||
td(job.getState()).
|
||||
td().
|
||||
span().$title(mapPct)._(). // for sorting
|
||||
span().$title(job.getMapProgressPercent())._(). // for sorting
|
||||
div(_PROGRESSBAR).
|
||||
$title(join(mapPct, '%')). // tooltip
|
||||
$title(join(job.getMapProgressPercent(), '%')). // tooltip
|
||||
div(_PROGRESSBAR_VALUE).
|
||||
$style(join("width:", mapPct, '%'))._()._()._().
|
||||
td(mapsTotal).
|
||||
td(mapsCompleted).
|
||||
$style(join("width:", job.getMapProgressPercent(), '%'))._()._()._().
|
||||
td(String.valueOf(job.getMapsTotal())).
|
||||
td(String.valueOf(job.getMapsCompleted())).
|
||||
td().
|
||||
span().$title(reducePct)._(). // for sorting
|
||||
span().$title(job.getReduceProgressPercent())._(). // for sorting
|
||||
div(_PROGRESSBAR).
|
||||
$title(join(reducePct, '%')). // tooltip
|
||||
$title(join(job.getReduceProgressPercent(), '%')). // tooltip
|
||||
div(_PROGRESSBAR_VALUE).
|
||||
$style(join("width:", reducePct, '%'))._()._()._().
|
||||
td(reduceTotal).
|
||||
td(reduceCompleted)._();
|
||||
$style(join("width:", job.getReduceProgressPercent(), '%'))._()._()._().
|
||||
td(String.valueOf(job.getReducesTotal())).
|
||||
td(String.valueOf(job.getReducesCompleted()))._();
|
||||
}
|
||||
tbody._()._();
|
||||
}
|
||||
|
|
|
@ -18,23 +18,29 @@
|
|||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.percent;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
|
||||
|
||||
import java.util.Collection;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.inject.Inject;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TD;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TR;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
||||
public class TaskPage extends AppView {
|
||||
|
||||
|
@ -66,24 +72,26 @@ public class TaskPage extends AppView {
|
|||
th(".tsh", "Elapsed").
|
||||
th(".note", "Note")._()._().
|
||||
tbody();
|
||||
for (TaskAttempt ta : getTaskAttempts()) {
|
||||
String taid = MRApps.toString(ta.getID());
|
||||
String progress = percent(ta.getProgress());
|
||||
ContainerId containerId = ta.getAssignedContainerID();
|
||||
for (TaskAttempt attempt : getTaskAttempts()) {
|
||||
TaskAttemptInfo ta = new TaskAttemptInfo(attempt, true);
|
||||
String taid = ta.getId();
|
||||
String progress = percent(ta.getProgress() / 100);
|
||||
ContainerId containerId = ta.getAssignedContainerId();
|
||||
|
||||
String nodeHttpAddr = ta.getNodeHttpAddress();
|
||||
long startTime = ta.getLaunchTime();
|
||||
String nodeHttpAddr = ta.getNode();
|
||||
long startTime = ta.getStartTime();
|
||||
long finishTime = ta.getFinishTime();
|
||||
long elapsed = Times.elapsed(startTime, finishTime);
|
||||
long elapsed = ta.getElapsedTime();
|
||||
String diag = ta.getNote() == null ? "" : ta.getNote();
|
||||
TD<TR<TBODY<TABLE<Hamlet>>>> nodeTd = tbody.
|
||||
tr().
|
||||
td(".id", taid).
|
||||
td(".progress", progress).
|
||||
td(".state", ta.getState().toString()).
|
||||
td(".state", ta.getState()).
|
||||
td().
|
||||
a(".nodelink", url("http://", nodeHttpAddr), nodeHttpAddr);
|
||||
if (containerId != null) {
|
||||
String containerIdStr = ConverterUtils.toString(containerId);
|
||||
String containerIdStr = ta.getAssignedContainerIdStr();
|
||||
nodeTd._(" ").
|
||||
a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
|
||||
containerIdStr, app.getJob().getUserName()), "logs");
|
||||
|
@ -92,7 +100,7 @@ public class TaskPage extends AppView {
|
|||
td(".ts", Times.format(startTime)).
|
||||
td(".ts", Times.format(finishTime)).
|
||||
td(".dt", StringUtils.formatTime(elapsed)).
|
||||
td(".note", Joiner.on('\n').join(ta.getDiagnostics()))._();
|
||||
td(".note", diag)._();
|
||||
}
|
||||
tbody._()._();
|
||||
}
|
||||
|
|
|
@ -18,21 +18,24 @@
|
|||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.percent;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
||||
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
|
||||
import com.google.inject.Inject;
|
||||
|
||||
public class TasksBlock extends HtmlBlock {
|
||||
final App app;
|
||||
|
@ -67,16 +70,16 @@ public class TasksBlock extends HtmlBlock {
|
|||
if (type != null && task.getType() != type) {
|
||||
continue;
|
||||
}
|
||||
String tid = MRApps.toString(task.getID());
|
||||
TaskReport report = task.getReport();
|
||||
String pct = percent(report.getProgress());
|
||||
long startTime = report.getStartTime();
|
||||
long finishTime = report.getFinishTime();
|
||||
long elapsed = Times.elapsed(startTime, finishTime);
|
||||
TaskInfo info = new TaskInfo(task);
|
||||
String tid = info.getId();
|
||||
String pct = percent(info.getProgress() / 100);
|
||||
long startTime = info.getStartTime();
|
||||
long finishTime = info.getFinishTime();
|
||||
long elapsed = info.getElapsedTime();
|
||||
tbody.
|
||||
tr().
|
||||
td().
|
||||
br().$title(String.valueOf(task.getID().getId()))._(). // sorting
|
||||
br().$title(String.valueOf(info.getTaskNum()))._(). // sorting
|
||||
a(url("task", tid), tid)._().
|
||||
td().
|
||||
br().$title(pct)._().
|
||||
|
@ -84,7 +87,7 @@ public class TasksBlock extends HtmlBlock {
|
|||
$title(join(pct, '%')). // tooltip
|
||||
div(_PROGRESSBAR_VALUE).
|
||||
$style(join("width:", pct, '%'))._()._()._().
|
||||
td(report.getTaskState().toString()).
|
||||
td(info.getState()).
|
||||
td().
|
||||
br().$title(String.valueOf(startTime))._().
|
||||
_(Times.format(startTime))._().
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
||||
@XmlRootElement(name = "info")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AppInfo {
|
||||
|
||||
protected String appId;
|
||||
protected String name;
|
||||
protected String user;
|
||||
protected String hostname;
|
||||
protected long startedOn;
|
||||
protected long elapsedTime;
|
||||
|
||||
public AppInfo() {
|
||||
}
|
||||
|
||||
public AppInfo(App app, AppContext context) {
|
||||
this.appId = context.getApplicationID().toString();
|
||||
this.name = context.getApplicationName().toString();
|
||||
this.user = context.getUser().toString();
|
||||
this.startedOn = context.getStartTime();
|
||||
this.elapsedTime = Times.elapsed(context.getStartTime(), 0);
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.appId;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String getUser() {
|
||||
return this.user;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startedOn;
|
||||
}
|
||||
|
||||
public long getElapsedTime() {
|
||||
return this.elapsedTime;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ConfEntryInfo {
|
||||
|
||||
protected String name;
|
||||
protected String value;
|
||||
|
||||
public ConfEntryInfo() {
|
||||
}
|
||||
|
||||
public ConfEntryInfo(String key, String value) {
|
||||
this.name = key;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String getValue() {
|
||||
return this.value;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,66 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileContext;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ConfInfo {
|
||||
|
||||
protected String path;
|
||||
protected ArrayList<ConfEntryInfo> property;
|
||||
|
||||
public ConfInfo() {
|
||||
}
|
||||
|
||||
public ConfInfo(Job job, Configuration conf) throws IOException {
|
||||
|
||||
Path confPath = job.getConfFile();
|
||||
this.property = new ArrayList<ConfEntryInfo>();
|
||||
// Read in the configuration file and put it in a key/value table.
|
||||
FileContext fc = FileContext.getFileContext(confPath.toUri(), conf);
|
||||
Configuration jobConf = new Configuration(false);
|
||||
jobConf.addResource(fc.open(confPath));
|
||||
this.path = confPath.toString();
|
||||
for (Map.Entry<String, String> entry : jobConf) {
|
||||
this.property.add(new ConfEntryInfo(entry.getKey(), entry.getValue()));
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
public ArrayList<ConfEntryInfo> getProperties() {
|
||||
return this.property;
|
||||
}
|
||||
|
||||
public String getPath() {
|
||||
return this.path;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlElement;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
|
||||
|
||||
@XmlRootElement(name = "counterGroup")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class CounterGroupInfo {
|
||||
|
||||
protected String counterGroupName;
|
||||
@XmlElement(name = "counter")
|
||||
protected ArrayList<CounterInfo> counter;
|
||||
|
||||
public CounterGroupInfo() {
|
||||
}
|
||||
|
||||
public CounterGroupInfo(String name, CounterGroup g, CounterGroup mg,
|
||||
CounterGroup rg) {
|
||||
this.counterGroupName = name;
|
||||
this.counter = new ArrayList<CounterInfo>();
|
||||
|
||||
for (Counter c : g.getAllCounters().values()) {
|
||||
Counter mc = mg == null ? null : mg.getCounter(c.getName());
|
||||
Counter rc = rg == null ? null : rg.getCounter(c.getName());
|
||||
CounterInfo cinfo = new CounterInfo(c, mc, rc);
|
||||
this.counter.add(cinfo);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class CounterInfo {
|
||||
|
||||
protected String counterName;
|
||||
protected long totalCounterValue;
|
||||
protected long mapCounterValue;
|
||||
protected long reduceCounterValue;
|
||||
|
||||
public CounterInfo() {
|
||||
}
|
||||
|
||||
public CounterInfo(Counter counter, Counter mc, Counter rc) {
|
||||
this.counterName = counter.getName();
|
||||
this.totalCounterValue = counter.getValue();
|
||||
this.mapCounterValue = mc == null ? 0 : mc.getValue();
|
||||
this.reduceCounterValue = rc == null ? 0 : rc.getValue();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
|
||||
@XmlRootElement(name = "jobCounters")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobCounterInfo {
|
||||
|
||||
@XmlTransient
|
||||
protected Counters total = null;
|
||||
@XmlTransient
|
||||
protected Counters map = null;
|
||||
@XmlTransient
|
||||
protected Counters reduce = null;
|
||||
|
||||
protected String id;
|
||||
protected ArrayList<CounterGroupInfo> counterGroups;
|
||||
|
||||
public JobCounterInfo() {
|
||||
}
|
||||
|
||||
public JobCounterInfo(AppContext ctx, Job job) {
|
||||
getCounters(ctx, job);
|
||||
counterGroups = new ArrayList<CounterGroupInfo>();
|
||||
this.id = MRApps.toString(job.getID());
|
||||
|
||||
int numGroups = 0;
|
||||
|
||||
if (total != null) {
|
||||
for (CounterGroup g : total.getAllCounterGroups().values()) {
|
||||
if (g != null) {
|
||||
CounterGroup mg = map == null ? null : map.getCounterGroup(g
|
||||
.getName());
|
||||
CounterGroup rg = reduce == null ? null : reduce.getCounterGroup(g
|
||||
.getName());
|
||||
++numGroups;
|
||||
|
||||
CounterGroupInfo cginfo = new CounterGroupInfo(g.getName(), g, mg, rg);
|
||||
counterGroups.add(cginfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void getCounters(AppContext ctx, Job job) {
|
||||
total = JobImpl.newCounters();
|
||||
if (job == null) {
|
||||
return;
|
||||
}
|
||||
map = JobImpl.newCounters();
|
||||
reduce = JobImpl.newCounters();
|
||||
// Get all types of counters
|
||||
Map<TaskId, Task> tasks = job.getTasks();
|
||||
for (Task t : tasks.values()) {
|
||||
Counters counters = t.getCounters();
|
||||
JobImpl.incrAllCounters(total, counters);
|
||||
switch (t.getType()) {
|
||||
case MAP:
|
||||
JobImpl.incrAllCounters(map, counters);
|
||||
break;
|
||||
case REDUCE:
|
||||
JobImpl.incrAllCounters(reduce, counters);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,349 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.percent;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
||||
@XmlRootElement(name = "job")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobInfo {
|
||||
|
||||
// ok for any user to see
|
||||
protected long startTime;
|
||||
protected long finishTime;
|
||||
protected long elapsedTime;
|
||||
protected String id;
|
||||
protected String name;
|
||||
protected String user;
|
||||
protected String state;
|
||||
protected int mapsTotal;
|
||||
protected int mapsCompleted;
|
||||
protected float mapProgress;
|
||||
protected int reducesTotal;
|
||||
protected int reducesCompleted;
|
||||
protected float reduceProgress;
|
||||
|
||||
@XmlTransient
|
||||
protected String mapProgressPercent;
|
||||
@XmlTransient
|
||||
protected String reduceProgressPercent;
|
||||
|
||||
// these should only be seen if acls allow
|
||||
protected int mapsPending;
|
||||
protected int mapsRunning;
|
||||
protected int reducesPending;
|
||||
protected int reducesRunning;
|
||||
protected boolean uberized;
|
||||
protected String diagnostics;
|
||||
protected int newReduceAttempts = 0;
|
||||
protected int runningReduceAttempts = 0;
|
||||
protected int failedReduceAttempts = 0;
|
||||
protected int killedReduceAttempts = 0;
|
||||
protected int successfulReduceAttempts = 0;
|
||||
protected int newMapAttempts = 0;
|
||||
protected int runningMapAttempts = 0;
|
||||
protected int failedMapAttempts = 0;
|
||||
protected int killedMapAttempts = 0;
|
||||
protected int successfulMapAttempts = 0;
|
||||
protected ArrayList<ConfEntryInfo> acls;
|
||||
|
||||
@XmlTransient
|
||||
protected int numMaps;
|
||||
@XmlTransient
|
||||
protected int numReduces;
|
||||
|
||||
public JobInfo() {
|
||||
}
|
||||
|
||||
public JobInfo(Job job, Boolean hasAccess) {
|
||||
this.id = MRApps.toString(job.getID());
|
||||
JobReport report = job.getReport();
|
||||
countTasksAndAttempts(job);
|
||||
this.startTime = report.getStartTime();
|
||||
this.finishTime = report.getFinishTime();
|
||||
this.elapsedTime = Times.elapsed(this.startTime, this.finishTime);
|
||||
if (this.elapsedTime == -1) {
|
||||
this.elapsedTime = 0;
|
||||
}
|
||||
this.name = job.getName().toString();
|
||||
this.user = job.getUserName();
|
||||
this.state = job.getState().toString();
|
||||
this.mapsTotal = job.getTotalMaps();
|
||||
this.mapsCompleted = job.getCompletedMaps();
|
||||
this.mapProgress = report.getMapProgress() * 100;
|
||||
this.mapProgressPercent = percent(report.getMapProgress());
|
||||
this.reducesTotal = job.getTotalReduces();
|
||||
this.reducesCompleted = job.getCompletedReduces();
|
||||
this.reduceProgress = report.getReduceProgress() * 100;
|
||||
this.reduceProgressPercent = percent(report.getReduceProgress());
|
||||
|
||||
this.acls = new ArrayList<ConfEntryInfo>();
|
||||
if (hasAccess) {
|
||||
this.uberized = job.isUber();
|
||||
|
||||
List<String> diagnostics = job.getDiagnostics();
|
||||
if (diagnostics != null && !diagnostics.isEmpty()) {
|
||||
StringBuffer b = new StringBuffer();
|
||||
for (String diag : diagnostics) {
|
||||
b.append(diag);
|
||||
}
|
||||
this.diagnostics = b.toString();
|
||||
}
|
||||
|
||||
Map<JobACL, AccessControlList> allacls = job.getJobACLs();
|
||||
if (allacls != null) {
|
||||
for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
|
||||
this.acls.add(new ConfEntryInfo(entry.getKey().getAclName(), entry
|
||||
.getValue().getAclString()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public int getNewReduceAttempts() {
|
||||
return this.newReduceAttempts;
|
||||
}
|
||||
|
||||
public int getKilledReduceAttempts() {
|
||||
return this.killedReduceAttempts;
|
||||
}
|
||||
|
||||
public int getFailedReduceAttempts() {
|
||||
return this.failedReduceAttempts;
|
||||
}
|
||||
|
||||
public int getRunningReduceAttempts() {
|
||||
return this.runningReduceAttempts;
|
||||
}
|
||||
|
||||
public int getSuccessfulReduceAttempts() {
|
||||
return this.successfulReduceAttempts;
|
||||
}
|
||||
|
||||
public int getNewMapAttempts() {
|
||||
return this.newMapAttempts;
|
||||
}
|
||||
|
||||
public int getKilledMapAttempts() {
|
||||
return this.killedMapAttempts;
|
||||
}
|
||||
|
||||
public ArrayList<ConfEntryInfo> getAcls() {
|
||||
return acls;
|
||||
}
|
||||
|
||||
public int getFailedMapAttempts() {
|
||||
return this.failedMapAttempts;
|
||||
}
|
||||
|
||||
public int getRunningMapAttempts() {
|
||||
return this.runningMapAttempts;
|
||||
}
|
||||
|
||||
public int getSuccessfulMapAttempts() {
|
||||
return this.successfulMapAttempts;
|
||||
}
|
||||
|
||||
public int getReducesCompleted() {
|
||||
return this.reducesCompleted;
|
||||
}
|
||||
|
||||
public int getReducesTotal() {
|
||||
return this.reducesTotal;
|
||||
}
|
||||
|
||||
public int getReducesPending() {
|
||||
return this.reducesPending;
|
||||
}
|
||||
|
||||
public int getReducesRunning() {
|
||||
return this.reducesRunning;
|
||||
}
|
||||
|
||||
public int getMapsCompleted() {
|
||||
return this.mapsCompleted;
|
||||
}
|
||||
|
||||
public int getMapsTotal() {
|
||||
return this.mapsTotal;
|
||||
}
|
||||
|
||||
public int getMapsPending() {
|
||||
return this.mapsPending;
|
||||
}
|
||||
|
||||
public int getMapsRunning() {
|
||||
return this.mapsRunning;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public String getUser() {
|
||||
return this.user;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public long getElapsedTime() {
|
||||
return this.elapsedTime;
|
||||
}
|
||||
|
||||
public long getFinishTime() {
|
||||
return this.finishTime;
|
||||
}
|
||||
|
||||
public boolean isUberized() {
|
||||
return this.uberized;
|
||||
}
|
||||
|
||||
public String getdiagnostics() {
|
||||
return this.diagnostics;
|
||||
}
|
||||
|
||||
public float getMapProgress() {
|
||||
return this.mapProgress;
|
||||
}
|
||||
|
||||
public String getMapProgressPercent() {
|
||||
return this.mapProgressPercent;
|
||||
}
|
||||
|
||||
public float getReduceProgress() {
|
||||
return this.reduceProgress;
|
||||
}
|
||||
|
||||
public String getReduceProgressPercent() {
|
||||
return this.reduceProgressPercent;
|
||||
}
|
||||
|
||||
/**
|
||||
* Go through a job and update the member variables with counts for
|
||||
* information to output in the page.
|
||||
*
|
||||
* @param job
|
||||
* the job to get counts for.
|
||||
*/
|
||||
private void countTasksAndAttempts(Job job) {
|
||||
numReduces = 0;
|
||||
numMaps = 0;
|
||||
final Map<TaskId, Task> tasks = job.getTasks();
|
||||
if (tasks == null) {
|
||||
return;
|
||||
}
|
||||
for (Task task : tasks.values()) {
|
||||
switch (task.getType()) {
|
||||
case MAP:
|
||||
// Task counts
|
||||
switch (task.getState()) {
|
||||
case RUNNING:
|
||||
++this.mapsRunning;
|
||||
break;
|
||||
case SCHEDULED:
|
||||
++this.mapsPending;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case REDUCE:
|
||||
// Task counts
|
||||
switch (task.getState()) {
|
||||
case RUNNING:
|
||||
++this.reducesRunning;
|
||||
break;
|
||||
case SCHEDULED:
|
||||
++this.reducesPending;
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
// Attempts counts
|
||||
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
|
||||
int newAttempts, running, successful, failed, killed;
|
||||
for (TaskAttempt attempt : attempts.values()) {
|
||||
|
||||
newAttempts = 0;
|
||||
running = 0;
|
||||
successful = 0;
|
||||
failed = 0;
|
||||
killed = 0;
|
||||
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
|
||||
++newAttempts;
|
||||
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
|
||||
++running;
|
||||
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
|
||||
.getState())) {
|
||||
++successful;
|
||||
} else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
|
||||
++failed;
|
||||
} else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
|
||||
++killed;
|
||||
}
|
||||
|
||||
switch (task.getType()) {
|
||||
case MAP:
|
||||
this.newMapAttempts += newAttempts;
|
||||
this.runningMapAttempts += running;
|
||||
this.successfulMapAttempts += successful;
|
||||
this.failedMapAttempts += failed;
|
||||
this.killedMapAttempts += killed;
|
||||
break;
|
||||
case REDUCE:
|
||||
this.newReduceAttempts += newAttempts;
|
||||
this.runningReduceAttempts += running;
|
||||
this.successfulReduceAttempts += successful;
|
||||
this.failedReduceAttempts += failed;
|
||||
this.killedReduceAttempts += killed;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
|
||||
@XmlRootElement(name = "JobTaskAttemptCounters")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobTaskAttemptCounterInfo {
|
||||
|
||||
@XmlTransient
|
||||
protected Counters total = null;
|
||||
|
||||
protected String id;
|
||||
protected ArrayList<TaskCounterGroupInfo> taskCounterGroups;
|
||||
|
||||
public JobTaskAttemptCounterInfo() {
|
||||
}
|
||||
|
||||
public JobTaskAttemptCounterInfo(TaskAttempt taskattempt) {
|
||||
|
||||
long value = 0;
|
||||
this.id = MRApps.toString(taskattempt.getID());
|
||||
total = taskattempt.getCounters();
|
||||
taskCounterGroups = new ArrayList<TaskCounterGroupInfo>();
|
||||
if (total != null) {
|
||||
for (CounterGroup g : total.getAllCounterGroups().values()) {
|
||||
if (g != null) {
|
||||
TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
|
||||
if (cginfo != null) {
|
||||
taskCounterGroups.add(cginfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
|
||||
@XmlRootElement(name = "jobTaskCounters")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobTaskCounterInfo {
|
||||
|
||||
@XmlTransient
|
||||
protected Counters total = null;
|
||||
|
||||
protected String id;
|
||||
protected ArrayList<TaskCounterGroupInfo> taskCounterGroups;
|
||||
|
||||
public JobTaskCounterInfo() {
|
||||
}
|
||||
|
||||
public JobTaskCounterInfo(Task task) {
|
||||
total = task.getCounters();
|
||||
this.id = MRApps.toString(task.getID());
|
||||
taskCounterGroups = new ArrayList<TaskCounterGroupInfo>();
|
||||
if (total != null) {
|
||||
for (CounterGroup g : total.getAllCounterGroups().values()) {
|
||||
if (g != null) {
|
||||
TaskCounterGroupInfo cginfo = new TaskCounterGroupInfo(g.getName(), g);
|
||||
taskCounterGroups.add(cginfo);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by joblicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "jobs")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobsInfo {
|
||||
|
||||
protected ArrayList<JobInfo> job = new ArrayList<JobInfo>();
|
||||
|
||||
public JobsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(JobInfo jobInfo) {
|
||||
job.add(jobInfo);
|
||||
}
|
||||
|
||||
public ArrayList<JobInfo> getJobs() {
|
||||
return job;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
||||
@XmlRootElement(name = "taskAttempt")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ReduceTaskAttemptInfo extends TaskAttemptInfo {
|
||||
|
||||
protected long shuffleFinishTime;
|
||||
protected long mergeFinishTime;
|
||||
protected long elapsedShuffleTime;
|
||||
protected long elapsedMergeTime;
|
||||
protected long elapsedReduceTime;
|
||||
|
||||
public ReduceTaskAttemptInfo() {
|
||||
}
|
||||
|
||||
public ReduceTaskAttemptInfo(TaskAttempt ta, TaskType type) {
|
||||
super(ta, type, false);
|
||||
|
||||
this.shuffleFinishTime = ta.getShuffleFinishTime();
|
||||
this.mergeFinishTime = ta.getSortFinishTime();
|
||||
this.elapsedShuffleTime = Times.elapsed(this.startTime,
|
||||
this.shuffleFinishTime, false);
|
||||
if (this.elapsedShuffleTime == -1) {
|
||||
this.elapsedShuffleTime = 0;
|
||||
}
|
||||
this.elapsedMergeTime = Times.elapsed(this.shuffleFinishTime,
|
||||
this.mergeFinishTime, false);
|
||||
if (this.elapsedMergeTime == -1) {
|
||||
this.elapsedMergeTime = 0;
|
||||
}
|
||||
this.elapsedReduceTime = Times.elapsed(this.mergeFinishTime,
|
||||
this.finishTime, false);
|
||||
if (this.elapsedReduceTime == -1) {
|
||||
this.elapsedReduceTime = 0;
|
||||
}
|
||||
}
|
||||
|
||||
public long getShuffleFinishTime() {
|
||||
return this.shuffleFinishTime;
|
||||
}
|
||||
|
||||
public long getMergeFinishTime() {
|
||||
return this.mergeFinishTime;
|
||||
}
|
||||
|
||||
public long getElapsedShuffleTime() {
|
||||
return this.elapsedShuffleTime;
|
||||
}
|
||||
|
||||
public long getElapsedMergeTime() {
|
||||
return this.elapsedMergeTime;
|
||||
}
|
||||
|
||||
public long getElapsedReduceTime() {
|
||||
return this.elapsedReduceTime;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.List;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlSeeAlso;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
||||
@XmlRootElement(name = "taskAttempt")
|
||||
@XmlSeeAlso({ ReduceTaskAttemptInfo.class })
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class TaskAttemptInfo {
|
||||
|
||||
protected long startTime;
|
||||
protected long finishTime;
|
||||
protected long elapsedTime;
|
||||
protected float progress;
|
||||
protected String id;
|
||||
protected String rack;
|
||||
protected String state;
|
||||
protected String nodeHttpAddress;
|
||||
protected String diagnostics;
|
||||
protected String type;
|
||||
protected String assignedContainerId;
|
||||
|
||||
@XmlTransient
|
||||
protected ContainerId assignedContainer;
|
||||
|
||||
public TaskAttemptInfo() {
|
||||
}
|
||||
|
||||
public TaskAttemptInfo(TaskAttempt ta, Boolean isRunning) {
|
||||
this(ta, TaskType.MAP, isRunning);
|
||||
}
|
||||
|
||||
public TaskAttemptInfo(TaskAttempt ta, TaskType type, Boolean isRunning) {
|
||||
this.type = type.toString();
|
||||
this.id = MRApps.toString(ta.getID());
|
||||
this.nodeHttpAddress = ta.getNodeHttpAddress();
|
||||
this.startTime = ta.getLaunchTime();
|
||||
this.finishTime = ta.getFinishTime();
|
||||
this.assignedContainerId = ConverterUtils.toString(ta
|
||||
.getAssignedContainerID());
|
||||
this.assignedContainer = ta.getAssignedContainerID();
|
||||
this.progress = ta.getProgress() * 100;
|
||||
this.state = ta.getState().toString();
|
||||
this.elapsedTime = Times
|
||||
.elapsed(this.startTime, this.finishTime, isRunning);
|
||||
if (this.elapsedTime == -1) {
|
||||
this.elapsedTime = 0;
|
||||
}
|
||||
List<String> diagnostics = ta.getDiagnostics();
|
||||
if (diagnostics != null && !diagnostics.isEmpty()) {
|
||||
StringBuffer b = new StringBuffer();
|
||||
for (String diag : diagnostics) {
|
||||
b.append(diag);
|
||||
}
|
||||
this.diagnostics = b.toString();
|
||||
}
|
||||
this.rack = ta.getNodeRackName();
|
||||
}
|
||||
|
||||
public String getAssignedContainerIdStr() {
|
||||
return this.assignedContainerId;
|
||||
}
|
||||
|
||||
public ContainerId getAssignedContainerId() {
|
||||
return this.assignedContainer;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public long getFinishTime() {
|
||||
return this.finishTime;
|
||||
}
|
||||
|
||||
public float getProgress() {
|
||||
return this.progress;
|
||||
}
|
||||
|
||||
public long getElapsedTime() {
|
||||
return this.elapsedTime;
|
||||
}
|
||||
|
||||
public String getNode() {
|
||||
return this.nodeHttpAddress;
|
||||
}
|
||||
|
||||
public String getRack() {
|
||||
return this.rack;
|
||||
}
|
||||
|
||||
public String getNote() {
|
||||
return this.diagnostics;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by taskattemptlicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "taskattempts")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class TaskAttemptsInfo {
|
||||
|
||||
protected ArrayList<TaskAttemptInfo> taskattempt = new ArrayList<TaskAttemptInfo>();
|
||||
|
||||
public TaskAttemptsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(TaskAttemptInfo taskattemptInfo) {
|
||||
taskattempt.add(taskattemptInfo);
|
||||
}
|
||||
|
||||
public ArrayList<TaskAttemptInfo> getTaskAttempts() {
|
||||
return taskattempt;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class TaskCounterGroupInfo {
|
||||
|
||||
protected String counterGroupName;
|
||||
protected ArrayList<TaskCounterInfo> counter;
|
||||
|
||||
public TaskCounterGroupInfo() {
|
||||
}
|
||||
|
||||
public TaskCounterGroupInfo(String name, CounterGroup g) {
|
||||
this.counterGroupName = name;
|
||||
this.counter = new ArrayList<TaskCounterInfo>();
|
||||
|
||||
for (Counter c : g.getAllCounters().values()) {
|
||||
TaskCounterInfo cinfo = new TaskCounterInfo(c.getName(), c.getValue());
|
||||
this.counter.add(cinfo);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "counter")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class TaskCounterInfo {
|
||||
|
||||
protected String name;
|
||||
protected long value;
|
||||
|
||||
public TaskCounterInfo() {
|
||||
}
|
||||
|
||||
public TaskCounterInfo(String name, long value) {
|
||||
this.name = name;
|
||||
this.value = value;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public long getValue() {
|
||||
return value;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
||||
@XmlRootElement(name = "task")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class TaskInfo {
|
||||
|
||||
protected long startTime;
|
||||
protected long finishTime;
|
||||
protected long elapsedTime;
|
||||
protected float progress;
|
||||
protected String id;
|
||||
protected String state;
|
||||
protected String type;
|
||||
protected String successfulAttempt;
|
||||
|
||||
@XmlTransient
|
||||
int taskNum;
|
||||
|
||||
@XmlTransient
|
||||
TaskAttempt successful;
|
||||
|
||||
public TaskInfo() {
|
||||
}
|
||||
|
||||
public TaskInfo(Task task) {
|
||||
TaskType ttype = task.getType();
|
||||
this.type = ttype.toString();
|
||||
TaskReport report = task.getReport();
|
||||
this.startTime = report.getStartTime();
|
||||
this.finishTime = report.getFinishTime();
|
||||
this.elapsedTime = Times.elapsed(this.startTime, this.finishTime, false);
|
||||
if (this.elapsedTime == -1) {
|
||||
this.elapsedTime = 0;
|
||||
}
|
||||
this.state = report.getTaskState().toString();
|
||||
this.progress = report.getProgress() * 100;
|
||||
this.id = MRApps.toString(task.getID());
|
||||
this.taskNum = task.getID().getId();
|
||||
this.successful = getSuccessfulAttempt(task);
|
||||
if (successful != null) {
|
||||
this.successfulAttempt = MRApps.toString(successful.getID());
|
||||
} else {
|
||||
this.successfulAttempt = "";
|
||||
}
|
||||
}
|
||||
|
||||
public float getProgress() {
|
||||
return this.progress;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public int getTaskNum() {
|
||||
return this.taskNum;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public long getFinishTime() {
|
||||
return this.finishTime;
|
||||
}
|
||||
|
||||
public long getElapsedTime() {
|
||||
return this.elapsedTime;
|
||||
}
|
||||
|
||||
public String getSuccessfulAttempt() {
|
||||
return this.successfulAttempt;
|
||||
}
|
||||
|
||||
public TaskAttempt getSuccessful() {
|
||||
return this.successful;
|
||||
}
|
||||
|
||||
private TaskAttempt getSuccessfulAttempt(Task task) {
|
||||
for (TaskAttempt attempt : task.getAttempts().values()) {
|
||||
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
|
||||
return attempt;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by tasklicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.app.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "tasks")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class TasksInfo {
|
||||
|
||||
protected ArrayList<TaskInfo> task = new ArrayList<TaskInfo>();
|
||||
|
||||
public TasksInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(TaskInfo taskInfo) {
|
||||
task.add(taskInfo);
|
||||
}
|
||||
|
||||
public ArrayList<TaskInfo> getTasks() {
|
||||
return task;
|
||||
}
|
||||
|
||||
}
|
|
@ -353,7 +353,7 @@ public class JobHistoryParser {
|
|||
* The class where job information is aggregated into after parsing
|
||||
*/
|
||||
public static class JobInfo {
|
||||
String errorInfo = "None";
|
||||
String errorInfo = "";
|
||||
long submitTime;
|
||||
long finishTime;
|
||||
JobID jobid;
|
||||
|
|
|
@ -27,12 +27,11 @@ import java.security.PrivilegedExceptionAction;
|
|||
import java.util.Arrays;
|
||||
import java.util.Collection;
|
||||
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
|
||||
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
|
||||
|
@ -79,14 +78,14 @@ import org.apache.hadoop.yarn.webapp.WebApp;
|
|||
import org.apache.hadoop.yarn.webapp.WebApps;
|
||||
|
||||
/**
|
||||
* This module is responsible for talking to the
|
||||
* This module is responsible for talking to the
|
||||
* JobClient (user facing).
|
||||
*
|
||||
*/
|
||||
public class HistoryClientService extends AbstractService {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(HistoryClientService.class);
|
||||
|
||||
|
||||
private MRClientProtocol protocolHandler;
|
||||
private Server server;
|
||||
private WebApp webApp;
|
||||
|
@ -118,22 +117,22 @@ public class HistoryClientService extends AbstractService {
|
|||
server =
|
||||
rpc.getServer(MRClientProtocol.class, protocolHandler, address,
|
||||
conf, null,
|
||||
conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT,
|
||||
conf.getInt(JHAdminConfig.MR_HISTORY_CLIENT_THREAD_COUNT,
|
||||
JHAdminConfig.DEFAULT_MR_HISTORY_CLIENT_THREAD_COUNT));
|
||||
|
||||
|
||||
// Enable service authorization?
|
||||
if (conf.getBoolean(
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
|
||||
CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
|
||||
false)) {
|
||||
server.refreshServiceAcl(conf, new MRAMPolicyProvider());
|
||||
}
|
||||
|
||||
|
||||
server.start();
|
||||
this.bindAddress =
|
||||
NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
|
||||
+ ":" + server.getPort());
|
||||
LOG.info("Instantiated MRClientService at " + this.bindAddress);
|
||||
|
||||
|
||||
super.start();
|
||||
}
|
||||
|
||||
|
@ -141,7 +140,7 @@ public class HistoryClientService extends AbstractService {
|
|||
webApp = new HsWebApp(history);
|
||||
String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
|
||||
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
|
||||
WebApps.$for("jobhistory", this).with(conf).at(bindAddress).start(webApp);
|
||||
WebApps.$for("jobhistory", HistoryClientService.class, this, "ws").with(conf).at(bindAddress).start(webApp);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -158,7 +157,7 @@ public class HistoryClientService extends AbstractService {
|
|||
private class MRClientProtocolHandler implements MRClientProtocol {
|
||||
|
||||
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
|
||||
|
||||
|
||||
private Job verifyAndGetJob(final JobId jobID) throws YarnRemoteException {
|
||||
UserGroupInformation loginUgi = null;
|
||||
Job job = null;
|
||||
|
@ -194,7 +193,7 @@ public class HistoryClientService extends AbstractService {
|
|||
response.setCounters(job.getCounters());
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException {
|
||||
JobId jobId = request.getJobId();
|
||||
|
@ -227,23 +226,23 @@ public class HistoryClientService extends AbstractService {
|
|||
JobId jobId = request.getJobId();
|
||||
int fromEventId = request.getFromEventId();
|
||||
int maxEvents = request.getMaxEvents();
|
||||
|
||||
|
||||
Job job = verifyAndGetJob(jobId);
|
||||
GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class);
|
||||
response.addAllCompletionEvents(Arrays.asList(job.getTaskAttemptCompletionEvents(fromEventId, maxEvents)));
|
||||
return response;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public KillJobResponse killJob(KillJobRequest request) throws YarnRemoteException {
|
||||
throw RPCUtil.getRemoteException("Invalid operation on completed job");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public KillTaskResponse killTask(KillTaskRequest request) throws YarnRemoteException {
|
||||
throw RPCUtil.getRemoteException("Invalid operation on completed job");
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public KillTaskAttemptResponse killTaskAttempt(KillTaskAttemptRequest request) throws YarnRemoteException {
|
||||
throw RPCUtil.getRemoteException("Invalid operation on completed job");
|
||||
|
@ -252,15 +251,15 @@ public class HistoryClientService extends AbstractService {
|
|||
@Override
|
||||
public GetDiagnosticsResponse getDiagnostics(GetDiagnosticsRequest request) throws YarnRemoteException {
|
||||
TaskAttemptId taskAttemptId = request.getTaskAttemptId();
|
||||
|
||||
|
||||
Job job = verifyAndGetJob(taskAttemptId.getTaskId().getJobId());
|
||||
|
||||
|
||||
GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class);
|
||||
response.addAllDiagnostics(job.getTask(taskAttemptId.getTaskId()).getAttempt(taskAttemptId).getDiagnostics());
|
||||
return response;
|
||||
}
|
||||
|
||||
@Override
|
||||
@Override
|
||||
public FailTaskAttemptResponse failTaskAttempt(FailTaskAttemptRequest request) throws YarnRemoteException {
|
||||
throw RPCUtil.getRemoteException("Invalid operation on completed job");
|
||||
}
|
||||
|
@ -269,7 +268,7 @@ public class HistoryClientService extends AbstractService {
|
|||
public GetTaskReportsResponse getTaskReports(GetTaskReportsRequest request) throws YarnRemoteException {
|
||||
JobId jobId = request.getJobId();
|
||||
TaskType taskType = request.getTaskType();
|
||||
|
||||
|
||||
GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class);
|
||||
Job job = verifyAndGetJob(jobId);
|
||||
Collection<Task> tasks = job.getTasks(taskType).values();
|
||||
|
|
|
@ -21,7 +21,7 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
|
|||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
|
||||
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
||||
|
||||
|
@ -45,8 +45,9 @@ public class HsAboutPage extends HsView {
|
|||
* @return AttemptsBlock.class
|
||||
*/
|
||||
@Override protected Class<? extends SubView> content() {
|
||||
HistoryInfo info = new HistoryInfo();
|
||||
info("History Server").
|
||||
_("BuildVersion", VersionInfo.getBuildVersion());
|
||||
_("BuildVersion", info.getHadoopBuildVersion() + " on " + info.getHadoopVersionBuiltOn());
|
||||
return InfoBlock.class;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,6 +34,9 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
|||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
@ -56,19 +59,6 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
|
|||
public class HsJobBlock extends HtmlBlock {
|
||||
final AppContext appContext;
|
||||
|
||||
int killedMapAttempts = 0;
|
||||
int failedMapAttempts = 0;
|
||||
int successfulMapAttempts = 0;
|
||||
int killedReduceAttempts = 0;
|
||||
int failedReduceAttempts = 0;
|
||||
int successfulReduceAttempts = 0;
|
||||
long avgMapTime = 0;
|
||||
long avgReduceTime = 0;
|
||||
long avgShuffleTime = 0;
|
||||
long avgSortTime = 0;
|
||||
int numMaps;
|
||||
int numReduces;
|
||||
|
||||
@Inject HsJobBlock(AppContext appctx) {
|
||||
appContext = appctx;
|
||||
}
|
||||
|
@ -85,37 +75,30 @@ public class HsJobBlock extends HtmlBlock {
|
|||
return;
|
||||
}
|
||||
JobId jobID = MRApps.toJobID(jid);
|
||||
Job job = appContext.getJob(jobID);
|
||||
if (job == null) {
|
||||
Job j = appContext.getJob(jobID);
|
||||
if (j == null) {
|
||||
html.
|
||||
p()._("Sorry, ", jid, " not found.")._();
|
||||
return;
|
||||
}
|
||||
Map<JobACL, AccessControlList> acls = job.getJobACLs();
|
||||
List<AMInfo> amInfos = job.getAMInfos();
|
||||
JobReport jobReport = job.getReport();
|
||||
int mapTasks = job.getTotalMaps();
|
||||
int mapTasksComplete = job.getCompletedMaps();
|
||||
int reduceTasks = job.getTotalReduces();
|
||||
int reducesTasksComplete = job.getCompletedReduces();
|
||||
long startTime = jobReport.getStartTime();
|
||||
long finishTime = jobReport.getFinishTime();
|
||||
countTasksAndAttempts(job);
|
||||
List<AMInfo> amInfos = j.getAMInfos();
|
||||
JobInfo job = new JobInfo(j);
|
||||
ResponseInfo infoBlock = info("Job Overview").
|
||||
_("Job Name:", job.getName()).
|
||||
_("User Name:", job.getUserName()).
|
||||
_("Queue:", job.getQueueName()).
|
||||
_("State:", job.getState()).
|
||||
_("Uberized:", job.isUber()).
|
||||
_("Started:", new Date(startTime)).
|
||||
_("Finished:", new Date(finishTime)).
|
||||
_("Started:", new Date(job.getStartTime())).
|
||||
_("Finished:", new Date(job.getFinishTime())).
|
||||
_("Elapsed:", StringUtils.formatTime(
|
||||
Times.elapsed(startTime, finishTime, false)));
|
||||
Times.elapsed(job.getStartTime(), job.getFinishTime(), false)));
|
||||
|
||||
String amString =
|
||||
amInfos.size() == 1 ? "ApplicationMaster" : "ApplicationMasters";
|
||||
|
||||
List<String> diagnostics = job.getDiagnostics();
|
||||
// todo - switch to use JobInfo
|
||||
List<String> diagnostics = j.getDiagnostics();
|
||||
if(diagnostics != null && !diagnostics.isEmpty()) {
|
||||
StringBuffer b = new StringBuffer();
|
||||
for(String diag: diagnostics) {
|
||||
|
@ -124,18 +107,17 @@ public class HsJobBlock extends HtmlBlock {
|
|||
infoBlock._("Diagnostics:", b.toString());
|
||||
}
|
||||
|
||||
if(numMaps > 0) {
|
||||
infoBlock._("Average Map Time", StringUtils.formatTime(avgMapTime));
|
||||
if(job.getNumMaps() > 0) {
|
||||
infoBlock._("Average Map Time", StringUtils.formatTime(job.getAvgMapTime()));
|
||||
}
|
||||
if(numReduces > 0) {
|
||||
infoBlock._("Average Reduce Time", StringUtils.formatTime(avgReduceTime));
|
||||
infoBlock._("Average Shuffle Time", StringUtils.formatTime(avgShuffleTime));
|
||||
infoBlock._("Average Merge Time", StringUtils.formatTime(avgSortTime));
|
||||
if(job.getNumReduces() > 0) {
|
||||
infoBlock._("Average Reduce Time", StringUtils.formatTime(job.getAvgReduceTime()));
|
||||
infoBlock._("Average Shuffle Time", StringUtils.formatTime(job.getAvgShuffleTime()));
|
||||
infoBlock._("Average Merge Time", StringUtils.formatTime(job.getAvgMergeTime()));
|
||||
}
|
||||
|
||||
for(Map.Entry<JobACL, AccessControlList> entry : acls.entrySet()) {
|
||||
infoBlock._("ACL "+entry.getKey().getAclName()+":",
|
||||
entry.getValue().getAclString());
|
||||
for (ConfEntryInfo entry : job.getAcls()) {
|
||||
infoBlock._("ACL "+entry.getName()+":", entry.getValue());
|
||||
}
|
||||
DIV<Hamlet> div = html.
|
||||
_(InfoBlock.class).
|
||||
|
@ -154,18 +136,14 @@ public class HsJobBlock extends HtmlBlock {
|
|||
th(_TH, "Logs").
|
||||
_();
|
||||
for (AMInfo amInfo : amInfos) {
|
||||
String nodeHttpAddress = amInfo.getNodeManagerHost() +
|
||||
":" + amInfo.getNodeManagerHttpPort();
|
||||
NodeId nodeId = BuilderUtils.newNodeId(
|
||||
amInfo.getNodeManagerHost(), amInfo.getNodeManagerPort());
|
||||
|
||||
AMAttemptInfo attempt = new AMAttemptInfo(amInfo,
|
||||
job.getId(), job.getUserName(), "", "");
|
||||
table.tr().
|
||||
td(String.valueOf(amInfo.getAppAttemptId().getAttemptId())).
|
||||
td(new Date(amInfo.getStartTime()).toString()).
|
||||
td().a(".nodelink", url("http://", nodeHttpAddress),
|
||||
nodeHttpAddress)._().
|
||||
td().a(".logslink", url("logs", nodeId.toString(),
|
||||
amInfo.getContainerId().toString(), jid, job.getUserName()),
|
||||
td(String.valueOf(attempt.getAttemptId())).
|
||||
td(new Date(attempt.getStartTime()).toString()).
|
||||
td().a(".nodelink", url("http://", attempt.getNodeHttpAddress()),
|
||||
attempt.getNodeHttpAddress())._().
|
||||
td().a(".logslink", url(attempt.getShortLogsLink()),
|
||||
"logs")._().
|
||||
_();
|
||||
}
|
||||
|
@ -184,13 +162,13 @@ public class HsJobBlock extends HtmlBlock {
|
|||
tr(_ODD).
|
||||
th().
|
||||
a(url("tasks", jid, "m"), "Map")._().
|
||||
td(String.valueOf(mapTasks)).
|
||||
td(String.valueOf(mapTasksComplete))._().
|
||||
td(String.valueOf(String.valueOf(job.getMapsTotal()))).
|
||||
td(String.valueOf(String.valueOf(job.getMapsCompleted())))._().
|
||||
tr(_EVEN).
|
||||
th().
|
||||
a(url("tasks", jid, "r"), "Reduce")._().
|
||||
td(String.valueOf(reduceTasks)).
|
||||
td(String.valueOf(reducesTasksComplete))._()
|
||||
td(String.valueOf(String.valueOf(job.getReducesTotal()))).
|
||||
td(String.valueOf(String.valueOf(job.getReducesCompleted())))._()
|
||||
._().
|
||||
|
||||
// Attempts table
|
||||
|
@ -204,99 +182,27 @@ public class HsJobBlock extends HtmlBlock {
|
|||
th("Maps").
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.FAILED.toString()),
|
||||
String.valueOf(failedMapAttempts))._().
|
||||
String.valueOf(job.getFailedMapAttempts()))._().
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.KILLED.toString()),
|
||||
String.valueOf(killedMapAttempts))._().
|
||||
String.valueOf(job.getKilledMapAttempts()))._().
|
||||
td().a(url("attempts", jid, "m",
|
||||
TaskAttemptStateUI.SUCCESSFUL.toString()),
|
||||
String.valueOf(successfulMapAttempts))._().
|
||||
String.valueOf(job.getSuccessfulMapAttempts()))._().
|
||||
_().
|
||||
tr(_EVEN).
|
||||
th("Reduces").
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.FAILED.toString()),
|
||||
String.valueOf(failedReduceAttempts))._().
|
||||
String.valueOf(job.getFailedReduceAttempts()))._().
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.KILLED.toString()),
|
||||
String.valueOf(killedReduceAttempts))._().
|
||||
String.valueOf(job.getKilledReduceAttempts()))._().
|
||||
td().a(url("attempts", jid, "r",
|
||||
TaskAttemptStateUI.SUCCESSFUL.toString()),
|
||||
String.valueOf(successfulReduceAttempts))._().
|
||||
String.valueOf(job.getSuccessfulReduceAttempts()))._().
|
||||
_().
|
||||
_().
|
||||
_();
|
||||
}
|
||||
|
||||
/**
|
||||
* Go through a job and update the member variables with counts for
|
||||
* information to output in the page.
|
||||
* @param job the job to get counts for.
|
||||
*/
|
||||
private void countTasksAndAttempts(Job job) {
|
||||
numReduces = 0;
|
||||
numMaps = 0;
|
||||
Map<TaskId, Task> tasks = job.getTasks();
|
||||
for (Task task : tasks.values()) {
|
||||
// Attempts counts
|
||||
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
|
||||
for (TaskAttempt attempt : attempts.values()) {
|
||||
|
||||
int successful = 0, failed = 0, killed =0;
|
||||
|
||||
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
|
||||
//Do Nothing
|
||||
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt
|
||||
.getState())) {
|
||||
//Do Nothing
|
||||
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
|
||||
.getState())) {
|
||||
++successful;
|
||||
} else if (TaskAttemptStateUI.FAILED
|
||||
.correspondsTo(attempt.getState())) {
|
||||
++failed;
|
||||
} else if (TaskAttemptStateUI.KILLED
|
||||
.correspondsTo(attempt.getState())) {
|
||||
++killed;
|
||||
}
|
||||
|
||||
switch (task.getType()) {
|
||||
case MAP:
|
||||
successfulMapAttempts += successful;
|
||||
failedMapAttempts += failed;
|
||||
killedMapAttempts += killed;
|
||||
if(attempt.getState() == TaskAttemptState.SUCCEEDED) {
|
||||
numMaps++;
|
||||
avgMapTime += (attempt.getFinishTime() -
|
||||
attempt.getLaunchTime());
|
||||
}
|
||||
break;
|
||||
case REDUCE:
|
||||
successfulReduceAttempts += successful;
|
||||
failedReduceAttempts += failed;
|
||||
killedReduceAttempts += killed;
|
||||
if(attempt.getState() == TaskAttemptState.SUCCEEDED) {
|
||||
numReduces++;
|
||||
avgShuffleTime += (attempt.getShuffleFinishTime() -
|
||||
attempt.getLaunchTime());
|
||||
avgSortTime += attempt.getSortFinishTime() -
|
||||
attempt.getLaunchTime();
|
||||
avgReduceTime += (attempt.getFinishTime() -
|
||||
attempt.getShuffleFinishTime());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if(numMaps > 0) {
|
||||
avgMapTime = avgMapTime / numMaps;
|
||||
}
|
||||
|
||||
if(numReduces > 0) {
|
||||
avgReduceTime = avgReduceTime / numReduces;
|
||||
avgShuffleTime = avgShuffleTime / numReduces;
|
||||
avgSortTime = avgSortTime / numReduces;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,10 +21,9 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
|
|||
import java.text.SimpleDateFormat;
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
|
@ -38,8 +37,8 @@ import com.google.inject.Inject;
|
|||
*/
|
||||
public class HsJobsBlock extends HtmlBlock {
|
||||
final AppContext appContext;
|
||||
static final SimpleDateFormat dateFormat =
|
||||
new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z");
|
||||
static final SimpleDateFormat dateFormat =
|
||||
new SimpleDateFormat("yyyy.MM.dd HH:mm:ss z");
|
||||
|
||||
@Inject HsJobsBlock(AppContext appCtx) {
|
||||
appContext = appCtx;
|
||||
|
@ -68,28 +67,21 @@ public class HsJobsBlock extends HtmlBlock {
|
|||
th("Reduces Completed")._()._().
|
||||
tbody();
|
||||
LOG.info("Getting list of all Jobs.");
|
||||
for (Job job : appContext.getAllJobs().values()) {
|
||||
String jobID = MRApps.toString(job.getID());
|
||||
JobReport report = job.getReport();
|
||||
String mapsTotal = String.valueOf(job.getTotalMaps());
|
||||
String mapsCompleted = String.valueOf(job.getCompletedMaps());
|
||||
String reduceTotal = String.valueOf(job.getTotalReduces());
|
||||
String reduceCompleted = String.valueOf(job.getCompletedReduces());
|
||||
long startTime = report.getStartTime();
|
||||
long finishTime = report.getFinishTime();
|
||||
for (Job j : appContext.getAllJobs().values()) {
|
||||
JobInfo job = new JobInfo(j);
|
||||
tbody.
|
||||
tr().
|
||||
td(dateFormat.format(new Date(startTime))).
|
||||
td(dateFormat.format(new Date(finishTime))).
|
||||
td().a(url("job", jobID), jobID)._().
|
||||
td(job.getName().toString()).
|
||||
td(dateFormat.format(new Date(job.getStartTime()))).
|
||||
td(dateFormat.format(new Date(job.getFinishTime()))).
|
||||
td().a(url("job", job.getId()), job.getId())._().
|
||||
td(job.getName()).
|
||||
td(job.getUserName()).
|
||||
td(job.getQueueName()).
|
||||
td(job.getState().toString()).
|
||||
td(mapsTotal).
|
||||
td(mapsCompleted).
|
||||
td(reduceTotal).
|
||||
td(reduceCompleted)._();
|
||||
td(job.getState()).
|
||||
td(String.valueOf(job.getMapsTotal())).
|
||||
td(String.valueOf(job.getMapsCompleted())).
|
||||
td(String.valueOf(job.getReducesTotal())).
|
||||
td(String.valueOf(job.getReducesCompleted()))._();
|
||||
}
|
||||
tbody._().
|
||||
tfoot().
|
||||
|
|
|
@ -20,12 +20,13 @@ package org.apache.hadoop.mapreduce.v2.hs.webapp;
|
|||
|
||||
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMParams.TASK_TYPE;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.App;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
@ -65,7 +66,7 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
if (!symbol.isEmpty()) {
|
||||
type = MRApps.taskType(symbol);
|
||||
}
|
||||
|
||||
|
||||
THEAD<TABLE<Hamlet>> thead = html.table("#tasks").thead();
|
||||
//Create the spanning row
|
||||
int attemptColSpan = type == TaskType.REDUCE ? 8 : 3;
|
||||
|
@ -74,7 +75,7 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
th().$colspan(attemptColSpan).$class("ui-state-default").
|
||||
_("Successful Attempt")._().
|
||||
_();
|
||||
|
||||
|
||||
TR<THEAD<TABLE<Hamlet>>> theadRow = thead.
|
||||
tr().
|
||||
th("Name").
|
||||
|
@ -83,33 +84,33 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
th("Finish Time").
|
||||
th("Elapsed Time").
|
||||
th("Start Time"); //Attempt
|
||||
|
||||
|
||||
if(type == TaskType.REDUCE) {
|
||||
theadRow.th("Shuffle Finish Time"); //Attempt
|
||||
theadRow.th("Merge Finish Time"); //Attempt
|
||||
}
|
||||
|
||||
|
||||
theadRow.th("Finish Time"); //Attempt
|
||||
|
||||
|
||||
if(type == TaskType.REDUCE) {
|
||||
theadRow.th("Elapsed Time Shuffle"); //Attempt
|
||||
theadRow.th("Elapsed Time Merge"); //Attempt
|
||||
theadRow.th("Elapsed Time Reduce"); //Attempt
|
||||
}
|
||||
theadRow.th("Elapsed Time"); //Attempt
|
||||
|
||||
|
||||
TBODY<TABLE<Hamlet>> tbody = theadRow._()._().tbody();
|
||||
for (Task task : app.getJob().getTasks().values()) {
|
||||
if (type != null && task.getType() != type) {
|
||||
continue;
|
||||
}
|
||||
String tid = MRApps.toString(task.getID());
|
||||
|
||||
TaskReport report = task.getReport();
|
||||
long startTime = report.getStartTime();
|
||||
long finishTime = report.getFinishTime();
|
||||
long elapsed = Times.elapsed(startTime, finishTime, false);
|
||||
|
||||
TaskInfo info = new TaskInfo(task);
|
||||
String tid = info.getId();
|
||||
|
||||
long startTime = info.getStartTime();
|
||||
long finishTime = info.getFinishTime();
|
||||
long elapsed = info.getElapsedTime();
|
||||
|
||||
long attemptStartTime = -1;
|
||||
long shuffleFinishTime = -1;
|
||||
long sortFinishTime = -1;
|
||||
|
@ -118,30 +119,31 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
long elapsedSortTime = -1;;
|
||||
long elapsedReduceTime = -1;
|
||||
long attemptElapsed = -1;
|
||||
TaskAttempt successful = getSuccessfulAttempt(task);
|
||||
TaskAttempt successful = info.getSuccessful();
|
||||
if(successful != null) {
|
||||
attemptStartTime = successful.getLaunchTime();
|
||||
attemptFinishTime = successful.getFinishTime();
|
||||
TaskAttemptInfo ta;
|
||||
if(type == TaskType.REDUCE) {
|
||||
shuffleFinishTime = successful.getShuffleFinishTime();
|
||||
sortFinishTime = successful.getSortFinishTime();
|
||||
elapsedShuffleTime =
|
||||
Times.elapsed(attemptStartTime, shuffleFinishTime, false);
|
||||
elapsedSortTime =
|
||||
Times.elapsed(shuffleFinishTime, sortFinishTime, false);
|
||||
elapsedReduceTime =
|
||||
Times.elapsed(sortFinishTime, attemptFinishTime, false);
|
||||
ReduceTaskAttemptInfo rta = new ReduceTaskAttemptInfo(successful, type);
|
||||
shuffleFinishTime = rta.getShuffleFinishTime();
|
||||
sortFinishTime = rta.getMergeFinishTime();
|
||||
elapsedShuffleTime = rta.getElapsedShuffleTime();
|
||||
elapsedSortTime = rta.getElapsedMergeTime();
|
||||
elapsedReduceTime = rta.getElapsedReduceTime();
|
||||
ta = rta;
|
||||
} else {
|
||||
ta = new TaskAttemptInfo(successful, type, false);
|
||||
}
|
||||
attemptElapsed =
|
||||
Times.elapsed(attemptStartTime, attemptFinishTime, false);
|
||||
attemptStartTime = ta.getStartTime();
|
||||
attemptFinishTime = ta.getFinishTime();
|
||||
attemptElapsed = ta.getElapsedTime();
|
||||
}
|
||||
|
||||
|
||||
TR<TBODY<TABLE<Hamlet>>> row = tbody.tr();
|
||||
row.
|
||||
td().
|
||||
br().$title(String.valueOf(task.getID().getId()))._(). // sorting
|
||||
br().$title(String.valueOf(info.getTaskNum()))._(). // sorting
|
||||
a(url("task", tid), tid)._().
|
||||
td(report.getTaskState().toString()).
|
||||
td(info.getState()).
|
||||
td().
|
||||
br().$title(String.valueOf(startTime))._().
|
||||
_(Times.format(startTime))._().
|
||||
|
@ -166,7 +168,7 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
td().
|
||||
br().$title(String.valueOf(attemptFinishTime))._().
|
||||
_(Times.format(attemptFinishTime))._();
|
||||
|
||||
|
||||
if(type == TaskType.REDUCE) {
|
||||
row.td().
|
||||
br().$title(String.valueOf(elapsedShuffleTime))._().
|
||||
|
@ -178,7 +180,7 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
br().$title(String.valueOf(elapsedReduceTime))._().
|
||||
_(formatTime(elapsedReduceTime))._();
|
||||
}
|
||||
|
||||
|
||||
row.td().
|
||||
br().$title(String.valueOf(attemptElapsed))._().
|
||||
_(formatTime(attemptElapsed))._();
|
||||
|
@ -194,7 +196,7 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
.$type(InputType.text).$name("elapsed_time").$value("Elapsed Time")._()
|
||||
._().th().input("search_init").$type(InputType.text)
|
||||
.$name("attempt_start_time").$value("Start Time")._()._();
|
||||
|
||||
|
||||
if(type == TaskType.REDUCE) {
|
||||
footRow.th().input("search_init").$type(InputType.text)
|
||||
.$name("shuffle_time").$value("Shuffle Time")._()._();
|
||||
|
@ -216,20 +218,12 @@ public class HsTasksBlock extends HtmlBlock {
|
|||
|
||||
footRow.th().input("search_init").$type(InputType.text)
|
||||
.$name("attempt_elapsed").$value("Elapsed Time")._()._();
|
||||
|
||||
|
||||
footRow._()._()._();
|
||||
}
|
||||
|
||||
private String formatTime(long elapsed) {
|
||||
return elapsed < 0 ? "N/A" : StringUtils.formatTime(elapsed);
|
||||
}
|
||||
|
||||
private TaskAttempt getSuccessfulAttempt(Task task) {
|
||||
for(TaskAttempt attempt: task.getAttempts().values()) {
|
||||
if(attempt.getState() == TaskAttemptState.SUCCEEDED) {
|
||||
return attempt;
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@ import static org.apache.hadoop.yarn.webapp.YarnWebParams.NM_NODENAME;
|
|||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.AMParams;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.HistoryContext;
|
||||
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
|
||||
public class HsWebApp extends WebApp implements AMParams {
|
||||
|
@ -39,6 +40,9 @@ public class HsWebApp extends WebApp implements AMParams {
|
|||
|
||||
@Override
|
||||
public void setup() {
|
||||
bind(HsWebServices.class);
|
||||
bind(JAXBContextResolver.class);
|
||||
bind(GenericExceptionHandler.class);
|
||||
bind(AppContext.class).toInstance(history);
|
||||
route("/", HsController.class);
|
||||
route("/app", HsController.class);
|
||||
|
|
|
@ -0,0 +1,469 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
|
||||
import org.apache.hadoop.mapreduce.v2.app.AppContext;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
|
||||
@Path("/ws/v1/history")
|
||||
public class HsWebServices {
|
||||
private final AppContext appCtx;
|
||||
private WebApp webapp;
|
||||
private final Configuration conf;
|
||||
|
||||
@Context
|
||||
UriInfo uriInfo;
|
||||
|
||||
@Inject
|
||||
public HsWebServices(final AppContext appCtx, final Configuration conf,
|
||||
final WebApp webapp) {
|
||||
this.appCtx = appCtx;
|
||||
this.conf = conf;
|
||||
this.webapp = webapp;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public HistoryInfo get() {
|
||||
return getHistoryInfo();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/info")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public HistoryInfo getHistoryInfo() {
|
||||
return new HistoryInfo();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobsInfo getJobs(@QueryParam("user") String userQuery,
|
||||
@QueryParam("limit") String count,
|
||||
@QueryParam("queue") String queueQuery,
|
||||
@QueryParam("startedTimeBegin") String startedBegin,
|
||||
@QueryParam("startedTimeEnd") String startedEnd,
|
||||
@QueryParam("finishedTimeBegin") String finishBegin,
|
||||
@QueryParam("finishedTimeEnd") String finishEnd) {
|
||||
JobsInfo allJobs = new JobsInfo();
|
||||
long num = 0;
|
||||
boolean checkCount = false;
|
||||
boolean checkStart = false;
|
||||
boolean checkEnd = false;
|
||||
long countNum = 0;
|
||||
|
||||
// set values suitable in case both of begin/end not specified
|
||||
long sBegin = 0;
|
||||
long sEnd = Long.MAX_VALUE;
|
||||
long fBegin = 0;
|
||||
long fEnd = Long.MAX_VALUE;
|
||||
|
||||
if (count != null && !count.isEmpty()) {
|
||||
checkCount = true;
|
||||
try {
|
||||
countNum = Long.parseLong(count);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
if (countNum <= 0) {
|
||||
throw new BadRequestException("limit value must be greater then 0");
|
||||
}
|
||||
}
|
||||
|
||||
if (startedBegin != null && !startedBegin.isEmpty()) {
|
||||
checkStart = true;
|
||||
try {
|
||||
sBegin = Long.parseLong(startedBegin);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
if (sBegin < 0) {
|
||||
throw new BadRequestException("startedTimeBegin must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (startedEnd != null && !startedEnd.isEmpty()) {
|
||||
checkStart = true;
|
||||
try {
|
||||
sEnd = Long.parseLong(startedEnd);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
if (sEnd < 0) {
|
||||
throw new BadRequestException("startedTimeEnd must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (sBegin > sEnd) {
|
||||
throw new BadRequestException(
|
||||
"startedTimeEnd must be greater than startTimeBegin");
|
||||
}
|
||||
|
||||
if (finishBegin != null && !finishBegin.isEmpty()) {
|
||||
checkEnd = true;
|
||||
try {
|
||||
fBegin = Long.parseLong(finishBegin);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
if (fBegin < 0) {
|
||||
throw new BadRequestException("finishTimeBegin must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (finishEnd != null && !finishEnd.isEmpty()) {
|
||||
checkEnd = true;
|
||||
try {
|
||||
fEnd = Long.parseLong(finishEnd);
|
||||
} catch (NumberFormatException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
if (fEnd < 0) {
|
||||
throw new BadRequestException("finishTimeEnd must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (fBegin > fEnd) {
|
||||
throw new BadRequestException(
|
||||
"finishTimeEnd must be greater than finishTimeBegin");
|
||||
}
|
||||
|
||||
for (Job job : appCtx.getAllJobs().values()) {
|
||||
if (checkCount && num == countNum) {
|
||||
break;
|
||||
}
|
||||
|
||||
// getAllJobs only gives you a partial we want a full
|
||||
Job fullJob = appCtx.getJob(job.getID());
|
||||
if (fullJob == null) {
|
||||
continue;
|
||||
}
|
||||
|
||||
JobInfo jobInfo = new JobInfo(fullJob);
|
||||
// can't really validate queue is a valid one since queues could change
|
||||
if (queueQuery != null && !queueQuery.isEmpty()) {
|
||||
if (!jobInfo.getQueueName().equals(queueQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (userQuery != null && !userQuery.isEmpty()) {
|
||||
if (!jobInfo.getName().equals(userQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (checkStart
|
||||
&& (jobInfo.getStartTime() < sBegin || jobInfo.getStartTime() > sEnd)) {
|
||||
continue;
|
||||
}
|
||||
if (checkEnd
|
||||
&& (jobInfo.getFinishTime() < fBegin || jobInfo.getFinishTime() > fEnd)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allJobs.add(jobInfo);
|
||||
num++;
|
||||
}
|
||||
return allJobs;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobInfo getJob(@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
return new JobInfo(job);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/attempts")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AMAttemptsInfo getJobAttempts(@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
AMAttemptsInfo amAttempts = new AMAttemptsInfo();
|
||||
for (AMInfo amInfo : job.getAMInfos()) {
|
||||
AMAttemptInfo attempt = new AMAttemptInfo(amInfo, MRApps.toString(job
|
||||
.getID()), job.getUserName(), uriInfo.getBaseUri().toString(),
|
||||
webapp.name());
|
||||
amAttempts.add(attempt);
|
||||
}
|
||||
return amAttempts;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/counters")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobCounterInfo getJobCounters(@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
return new JobCounterInfo(this.appCtx, job);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/counters")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobTaskCounterInfo getSingleTaskCounters(
|
||||
@PathParam("jobid") String jid, @PathParam("taskid") String tid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = this.appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
return new JobTaskCounterInfo(task);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/conf")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ConfInfo getJobConf(@PathParam("jobid") String jid) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
ConfInfo info;
|
||||
try {
|
||||
info = new ConfInfo(job, this.conf);
|
||||
} catch (IOException e) {
|
||||
throw new NotFoundException("unable to load configuration for job: "
|
||||
+ jid);
|
||||
}
|
||||
|
||||
return info;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/tasks")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TasksInfo getJobTasks(@PathParam("jobid") String jid,
|
||||
@QueryParam("type") String type) {
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
TasksInfo allTasks = new TasksInfo();
|
||||
for (Task task : job.getTasks().values()) {
|
||||
TaskType ttype = null;
|
||||
if (type != null && !type.isEmpty()) {
|
||||
try {
|
||||
ttype = MRApps.taskType(type);
|
||||
} catch (YarnException e) {
|
||||
throw new BadRequestException("tasktype must be either m or r");
|
||||
}
|
||||
}
|
||||
if (ttype != null && task.getType() != ttype) {
|
||||
continue;
|
||||
}
|
||||
allTasks.add(new TaskInfo(task));
|
||||
}
|
||||
return allTasks;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TaskInfo getJobTask(@PathParam("jobid") String jid,
|
||||
@PathParam("taskid") String tid) {
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
return new TaskInfo(task);
|
||||
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TaskAttemptsInfo getJobTaskAttempts(@PathParam("jobid") String jid,
|
||||
@PathParam("taskid") String tid) {
|
||||
TaskAttemptsInfo attempts = new TaskAttemptsInfo();
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
for (TaskAttempt ta : task.getAttempts().values()) {
|
||||
if (ta != null) {
|
||||
if (task.getType() == TaskType.REDUCE) {
|
||||
attempts.add(new ReduceTaskAttemptInfo(ta, task.getType()));
|
||||
} else {
|
||||
attempts.add(new TaskAttemptInfo(ta, task.getType(), false));
|
||||
}
|
||||
}
|
||||
}
|
||||
return attempts;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public TaskAttemptInfo getJobTaskAttemptId(@PathParam("jobid") String jid,
|
||||
@PathParam("taskid") String tid, @PathParam("attemptid") String attId) {
|
||||
Job job = this.appCtx.getJob(MRApps.toJobID(jid));
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId);
|
||||
if (attemptId == null) {
|
||||
throw new NotFoundException("task attempt id " + attId
|
||||
+ " not found or invalid");
|
||||
}
|
||||
TaskAttempt ta = task.getAttempt(attemptId);
|
||||
if (ta == null) {
|
||||
throw new NotFoundException("Error getting info on task attempt id "
|
||||
+ attId);
|
||||
}
|
||||
if (task.getType() == TaskType.REDUCE) {
|
||||
return new ReduceTaskAttemptInfo(ta, task.getType());
|
||||
} else {
|
||||
return new TaskAttemptInfo(ta, task.getType(), false);
|
||||
}
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/mapreduce/jobs/{jobid}/tasks/{taskid}/attempts/{attemptid}/counters")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public JobTaskAttemptCounterInfo getJobTaskAttemptIdCounters(
|
||||
@PathParam("jobid") String jid, @PathParam("taskid") String tid,
|
||||
@PathParam("attemptid") String attId) {
|
||||
JobId jobId = MRApps.toJobID(jid);
|
||||
if (jobId == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
Job job = this.appCtx.getJob(jobId);
|
||||
if (job == null) {
|
||||
throw new NotFoundException("job, " + jid + ", is not found");
|
||||
}
|
||||
TaskId taskID = MRApps.toTaskID(tid);
|
||||
if (taskID == null) {
|
||||
throw new NotFoundException("taskid " + tid + " not found or invalid");
|
||||
}
|
||||
Task task = job.getTask(taskID);
|
||||
if (task == null) {
|
||||
throw new NotFoundException("task not found with id " + tid);
|
||||
}
|
||||
TaskAttemptId attemptId = MRApps.toTaskAttemptID(attId);
|
||||
if (attemptId == null) {
|
||||
throw new NotFoundException("task attempt id " + attId
|
||||
+ " not found or invalid");
|
||||
}
|
||||
TaskAttempt ta = task.getAttempt(attemptId);
|
||||
if (ta == null) {
|
||||
throw new NotFoundException("Error getting info on task attempt id "
|
||||
+ attId);
|
||||
}
|
||||
return new JobTaskAttemptCounterInfo(ta);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,78 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp;
|
||||
|
||||
import com.google.inject.Singleton;
|
||||
import com.sun.jersey.api.json.JSONConfiguration;
|
||||
import com.sun.jersey.api.json.JSONJAXBContext;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.ws.rs.ext.ContextResolver;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterGroupInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.CounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskAttemptCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.JobTaskCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ReduceTaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskAttemptsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterGroupInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TaskCounterInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.TasksInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.AMAttemptsInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.HistoryInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
|
||||
|
||||
@Singleton
|
||||
@Provider
|
||||
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
|
||||
|
||||
private JAXBContext context;
|
||||
private final Set<Class> types;
|
||||
|
||||
// you have to specify all the dao classes here
|
||||
private final Class[] cTypes = { HistoryInfo.class, JobInfo.class,
|
||||
JobsInfo.class, TasksInfo.class, TaskAttemptsInfo.class, ConfInfo.class,
|
||||
CounterInfo.class, JobTaskCounterInfo.class,
|
||||
JobTaskAttemptCounterInfo.class,
|
||||
TaskCounterInfo.class, JobCounterInfo.class, ReduceTaskAttemptInfo.class,
|
||||
TaskAttemptInfo.class, TaskAttemptsInfo.class, CounterGroupInfo.class,
|
||||
TaskCounterGroupInfo.class,
|
||||
AMAttemptInfo.class, AMAttemptsInfo.class};
|
||||
|
||||
public JAXBContextResolver() throws Exception {
|
||||
this.types = new HashSet<Class>(Arrays.asList(cTypes));
|
||||
this.context = new JSONJAXBContext(JSONConfiguration.natural()
|
||||
.rootUnwrapping(false).build(), cTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JAXBContext getContext(Class<?> objectType) {
|
||||
return (types.contains(objectType)) ? context : null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,96 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
|
||||
@XmlRootElement(name = "amAttempt")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AMAttemptInfo {
|
||||
|
||||
protected String nodeHttpAddress;
|
||||
protected String nodeId;
|
||||
protected int id;
|
||||
protected long startTime;
|
||||
protected String containerId;
|
||||
protected String logsLink;
|
||||
|
||||
@XmlTransient
|
||||
protected String shortLogsLink;
|
||||
|
||||
public AMAttemptInfo() {
|
||||
}
|
||||
|
||||
public AMAttemptInfo(AMInfo amInfo, String jobId, String user, String host,
|
||||
String pathPrefix) {
|
||||
this.nodeHttpAddress = amInfo.getNodeManagerHost() + ":"
|
||||
+ amInfo.getNodeManagerHttpPort();
|
||||
NodeId nodeId = BuilderUtils.newNodeId(amInfo.getNodeManagerHost(),
|
||||
amInfo.getNodeManagerPort());
|
||||
this.nodeId = nodeId.toString();
|
||||
this.id = amInfo.getAppAttemptId().getAttemptId();
|
||||
this.startTime = amInfo.getStartTime();
|
||||
this.containerId = amInfo.getContainerId().toString();
|
||||
this.logsLink = join(
|
||||
host,
|
||||
pathPrefix,
|
||||
ujoin("logs", nodeId.toString(), amInfo.getContainerId().toString(),
|
||||
jobId, user));
|
||||
this.shortLogsLink = ujoin("logs", nodeId.toString(), amInfo
|
||||
.getContainerId().toString(), jobId, user);
|
||||
}
|
||||
|
||||
public String getNodeHttpAddress() {
|
||||
return this.nodeHttpAddress;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
public int getAttemptId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public String getContainerId() {
|
||||
return this.containerId;
|
||||
}
|
||||
|
||||
public String getLogsLink() {
|
||||
return this.logsLink;
|
||||
}
|
||||
|
||||
public String getShortLogsLink() {
|
||||
return this.shortLogsLink;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by joblicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "attempts")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AMAttemptsInfo {
|
||||
|
||||
protected ArrayList<AMAttemptInfo> attempt = new ArrayList<AMAttemptInfo>();
|
||||
|
||||
public AMAttemptsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(AMAttemptInfo info) {
|
||||
this.attempt.add(info);
|
||||
}
|
||||
|
||||
public ArrayList<AMAttemptInfo> getAttempts() {
|
||||
return this.attempt;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class HistoryInfo {
|
||||
|
||||
protected String hadoopVersion;
|
||||
protected String hadoopBuildVersion;
|
||||
protected String hadoopVersionBuiltOn;
|
||||
|
||||
public HistoryInfo() {
|
||||
this.hadoopVersion = VersionInfo.getVersion();
|
||||
this.hadoopBuildVersion = VersionInfo.getBuildVersion();
|
||||
this.hadoopVersionBuiltOn = VersionInfo.getDate();
|
||||
}
|
||||
|
||||
public String getHadoopVersion() {
|
||||
return this.hadoopVersion;
|
||||
}
|
||||
|
||||
public String getHadoopBuildVersion() {
|
||||
return this.hadoopBuildVersion;
|
||||
}
|
||||
|
||||
public String getHadoopVersionBuiltOn() {
|
||||
return this.hadoopVersionBuiltOn;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,295 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.mapreduce.JobACL;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
|
||||
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Job;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.Task;
|
||||
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
|
||||
import org.apache.hadoop.mapreduce.v2.app.webapp.dao.ConfEntryInfo;
|
||||
import org.apache.hadoop.mapreduce.v2.hs.CompletedJob;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps;
|
||||
import org.apache.hadoop.mapreduce.v2.util.MRApps.TaskAttemptStateUI;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
||||
@XmlRootElement(name = "job")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobInfo {
|
||||
|
||||
protected long startTime;
|
||||
protected long finishTime;
|
||||
protected String id;
|
||||
protected String name;
|
||||
protected String queue;
|
||||
protected String user;
|
||||
protected String state;
|
||||
protected int mapsTotal;
|
||||
protected int mapsCompleted;
|
||||
protected int reducesTotal;
|
||||
protected int reducesCompleted;
|
||||
protected boolean uberized;
|
||||
protected String diagnostics;
|
||||
protected long avgMapTime = 0;
|
||||
protected long avgReduceTime = 0;
|
||||
protected long avgShuffleTime = 0;
|
||||
protected long avgMergeTime = 0;
|
||||
protected int failedReduceAttempts = 0;
|
||||
protected int killedReduceAttempts = 0;
|
||||
protected int successfulReduceAttempts = 0;
|
||||
protected int failedMapAttempts = 0;
|
||||
protected int killedMapAttempts = 0;
|
||||
protected int successfulMapAttempts = 0;
|
||||
protected ArrayList<ConfEntryInfo> acls;
|
||||
|
||||
@XmlTransient
|
||||
protected int numMaps;
|
||||
@XmlTransient
|
||||
protected int numReduces;
|
||||
|
||||
public JobInfo() {
|
||||
}
|
||||
|
||||
public JobInfo(Job job) {
|
||||
this.id = MRApps.toString(job.getID());
|
||||
JobReport report = job.getReport();
|
||||
countTasksAndAttempts(job);
|
||||
this.mapsTotal = job.getTotalMaps();
|
||||
this.mapsCompleted = job.getCompletedMaps();
|
||||
this.reducesTotal = job.getTotalReduces();
|
||||
this.reducesCompleted = job.getCompletedReduces();
|
||||
this.startTime = report.getStartTime();
|
||||
this.finishTime = report.getFinishTime();
|
||||
this.name = job.getName().toString();
|
||||
this.queue = job.getQueueName();
|
||||
this.user = job.getUserName();
|
||||
this.state = job.getState().toString();
|
||||
this.uberized = job.isUber();
|
||||
List<String> diagnostics = job.getDiagnostics();
|
||||
if (diagnostics != null && !diagnostics.isEmpty()) {
|
||||
StringBuffer b = new StringBuffer();
|
||||
for (String diag : diagnostics) {
|
||||
b.append(diag);
|
||||
}
|
||||
this.diagnostics = b.toString();
|
||||
}
|
||||
|
||||
this.acls = new ArrayList<ConfEntryInfo>();
|
||||
if (job instanceof CompletedJob) {
|
||||
Map<JobACL, AccessControlList> allacls = job.getJobACLs();
|
||||
if (allacls != null) {
|
||||
for (Map.Entry<JobACL, AccessControlList> entry : allacls.entrySet()) {
|
||||
this.acls.add(new ConfEntryInfo(entry.getKey().getAclName(), entry
|
||||
.getValue().getAclString()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public long getNumMaps() {
|
||||
return numMaps;
|
||||
}
|
||||
|
||||
public long getNumReduces() {
|
||||
return numReduces;
|
||||
}
|
||||
|
||||
public long getAvgMapTime() {
|
||||
return avgMapTime;
|
||||
}
|
||||
|
||||
public long getAvgReduceTime() {
|
||||
return avgReduceTime;
|
||||
}
|
||||
|
||||
public long getAvgShuffleTime() {
|
||||
return avgShuffleTime;
|
||||
}
|
||||
|
||||
public long getAvgMergeTime() {
|
||||
return avgMergeTime;
|
||||
}
|
||||
|
||||
public long getFailedReduceAttempts() {
|
||||
return failedReduceAttempts;
|
||||
}
|
||||
|
||||
public long getKilledReduceAttempts() {
|
||||
return killedReduceAttempts;
|
||||
}
|
||||
|
||||
public long getSuccessfulReduceAttempts() {
|
||||
return successfulReduceAttempts;
|
||||
}
|
||||
|
||||
public long getFailedMapAttempts() {
|
||||
return failedMapAttempts;
|
||||
}
|
||||
|
||||
public long getKilledMapAttempts() {
|
||||
return killedMapAttempts;
|
||||
}
|
||||
|
||||
public long getSuccessfulMapAttempts() {
|
||||
return successfulMapAttempts;
|
||||
}
|
||||
|
||||
public ArrayList<ConfEntryInfo> getAcls() {
|
||||
return acls;
|
||||
}
|
||||
|
||||
public int getReducesCompleted() {
|
||||
return this.reducesCompleted;
|
||||
}
|
||||
|
||||
public int getReducesTotal() {
|
||||
return this.reducesTotal;
|
||||
}
|
||||
|
||||
public int getMapsCompleted() {
|
||||
return this.mapsCompleted;
|
||||
}
|
||||
|
||||
public int getMapsTotal() {
|
||||
return this.mapsTotal;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public String getUserName() {
|
||||
return this.user;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String getQueueName() {
|
||||
return this.queue;
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startTime;
|
||||
}
|
||||
|
||||
public long getFinishTime() {
|
||||
return this.finishTime;
|
||||
}
|
||||
|
||||
public boolean isUber() {
|
||||
return this.uberized;
|
||||
}
|
||||
|
||||
public String getDiagnostics() {
|
||||
return this.diagnostics;
|
||||
}
|
||||
|
||||
/**
|
||||
* Go through a job and update the member variables with counts for
|
||||
* information to output in the page.
|
||||
*
|
||||
* @param job
|
||||
* the job to get counts for.
|
||||
*/
|
||||
private void countTasksAndAttempts(Job job) {
|
||||
numReduces = 0;
|
||||
numMaps = 0;
|
||||
final Map<TaskId, Task> tasks = job.getTasks();
|
||||
if (tasks == null) {
|
||||
return;
|
||||
}
|
||||
for (Task task : tasks.values()) {
|
||||
// Attempts counts
|
||||
Map<TaskAttemptId, TaskAttempt> attempts = task.getAttempts();
|
||||
int successful, failed, killed;
|
||||
for (TaskAttempt attempt : attempts.values()) {
|
||||
|
||||
successful = 0;
|
||||
failed = 0;
|
||||
killed = 0;
|
||||
if (TaskAttemptStateUI.NEW.correspondsTo(attempt.getState())) {
|
||||
// Do Nothing
|
||||
} else if (TaskAttemptStateUI.RUNNING.correspondsTo(attempt.getState())) {
|
||||
// Do Nothing
|
||||
} else if (TaskAttemptStateUI.SUCCESSFUL.correspondsTo(attempt
|
||||
.getState())) {
|
||||
++successful;
|
||||
} else if (TaskAttemptStateUI.FAILED.correspondsTo(attempt.getState())) {
|
||||
++failed;
|
||||
} else if (TaskAttemptStateUI.KILLED.correspondsTo(attempt.getState())) {
|
||||
++killed;
|
||||
}
|
||||
|
||||
switch (task.getType()) {
|
||||
case MAP:
|
||||
successfulMapAttempts += successful;
|
||||
failedMapAttempts += failed;
|
||||
killedMapAttempts += killed;
|
||||
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
|
||||
numMaps++;
|
||||
avgMapTime += (attempt.getFinishTime() - attempt.getLaunchTime());
|
||||
}
|
||||
break;
|
||||
case REDUCE:
|
||||
successfulReduceAttempts += successful;
|
||||
failedReduceAttempts += failed;
|
||||
killedReduceAttempts += killed;
|
||||
if (attempt.getState() == TaskAttemptState.SUCCEEDED) {
|
||||
numReduces++;
|
||||
avgShuffleTime += (attempt.getShuffleFinishTime() - attempt
|
||||
.getLaunchTime());
|
||||
avgMergeTime += attempt.getSortFinishTime()
|
||||
- attempt.getLaunchTime();
|
||||
avgReduceTime += (attempt.getFinishTime() - attempt
|
||||
.getShuffleFinishTime());
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (numMaps > 0) {
|
||||
avgMapTime = avgMapTime / numMaps;
|
||||
}
|
||||
|
||||
if (numReduces > 0) {
|
||||
avgReduceTime = avgReduceTime / numReduces;
|
||||
avgShuffleTime = avgShuffleTime / numReduces;
|
||||
avgMergeTime = avgMergeTime / numReduces;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by joblicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.mapreduce.v2.hs.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "jobs")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class JobsInfo {
|
||||
|
||||
protected ArrayList<JobInfo> job = new ArrayList<JobInfo>();
|
||||
|
||||
public JobsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(JobInfo jobInfo) {
|
||||
this.job.add(jobInfo);
|
||||
}
|
||||
|
||||
public ArrayList<JobInfo> getJobs() {
|
||||
return this.job;
|
||||
}
|
||||
|
||||
}
|
|
@ -238,7 +238,7 @@
|
|||
<dependency>
|
||||
<groupId>com.google.inject.extensions</groupId>
|
||||
<artifactId>guice-servlet</artifactId>
|
||||
<version>2.0</version>
|
||||
<version>3.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.webapp;
|
||||
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Response.Status;
|
||||
|
||||
public class BadRequestException extends WebApplicationException {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public BadRequestException() {
|
||||
super(Status.BAD_REQUEST);
|
||||
}
|
||||
|
||||
public BadRequestException(java.lang.Throwable cause) {
|
||||
super(cause, Status.BAD_REQUEST);
|
||||
}
|
||||
|
||||
public BadRequestException(String msg) {
|
||||
super(new Exception(msg), Status.BAD_REQUEST);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.webapp;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import javax.servlet.RequestDispatcher;
|
||||
import javax.servlet.ServletException;
|
||||
import javax.servlet.http.HttpServlet;
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletRequestWrapper;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
public class DefaultWrapperServlet extends HttpServlet {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public void doGet(HttpServletRequest req, HttpServletResponse resp)
|
||||
throws ServletException, IOException {
|
||||
RequestDispatcher rd = getServletContext().getNamedDispatcher("default");
|
||||
|
||||
HttpServletRequest wrapped = new HttpServletRequestWrapper(req) {
|
||||
public String getServletPath() {
|
||||
return "";
|
||||
}
|
||||
};
|
||||
|
||||
rd.forward(wrapped, resp);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.webapp;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.Response;
|
||||
import javax.ws.rs.ext.ExceptionMapper;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
/**
|
||||
* Handle webservices jersey exceptions and create json response in the format:
|
||||
* { "RemoteException" :
|
||||
* {
|
||||
* "exception" : <exception type>,
|
||||
* "javaClassName" : <classname of exception>,
|
||||
* "message" : <error message from exception>
|
||||
* }
|
||||
* }
|
||||
*/
|
||||
@Singleton
|
||||
@Provider
|
||||
public class GenericExceptionHandler implements ExceptionMapper<Exception> {
|
||||
public static final Log LOG = LogFactory
|
||||
.getLog(GenericExceptionHandler.class);
|
||||
|
||||
private @Context
|
||||
HttpServletResponse response;
|
||||
|
||||
@Override
|
||||
public Response toResponse(Exception e) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("GOT EXCEPITION", e);
|
||||
}
|
||||
// Don't catch this as filter forward on 404
|
||||
// (ServletContainer.FEATURE_FILTER_FORWARD_ON_404)
|
||||
// won't work and the web UI won't work!
|
||||
if (e instanceof com.sun.jersey.api.NotFoundException) {
|
||||
return ((com.sun.jersey.api.NotFoundException) e).getResponse();
|
||||
}
|
||||
// clear content type
|
||||
response.setContentType(null);
|
||||
|
||||
// Convert exception
|
||||
if (e instanceof RemoteException) {
|
||||
e = ((RemoteException) e).unwrapRemoteException();
|
||||
}
|
||||
|
||||
// Map response status
|
||||
final Response.Status s;
|
||||
if (e instanceof SecurityException) {
|
||||
s = Response.Status.UNAUTHORIZED;
|
||||
} else if (e instanceof AuthorizationException) {
|
||||
s = Response.Status.UNAUTHORIZED;
|
||||
} else if (e instanceof FileNotFoundException) {
|
||||
s = Response.Status.NOT_FOUND;
|
||||
} else if (e instanceof NotFoundException) {
|
||||
s = Response.Status.NOT_FOUND;
|
||||
} else if (e instanceof IOException) {
|
||||
s = Response.Status.NOT_FOUND;
|
||||
} else if (e instanceof UnsupportedOperationException) {
|
||||
s = Response.Status.BAD_REQUEST;
|
||||
} else if (e instanceof IllegalArgumentException) {
|
||||
s = Response.Status.BAD_REQUEST;
|
||||
} else if (e instanceof NumberFormatException) {
|
||||
s = Response.Status.BAD_REQUEST;
|
||||
} else if (e instanceof BadRequestException) {
|
||||
s = Response.Status.BAD_REQUEST;
|
||||
} else {
|
||||
LOG.warn("INTERNAL_SERVER_ERROR", e);
|
||||
s = Response.Status.INTERNAL_SERVER_ERROR;
|
||||
}
|
||||
|
||||
// convert to json
|
||||
final Map<String, Object> m = new TreeMap<String, Object>();
|
||||
m.put("exception", e.getClass().getSimpleName());
|
||||
m.put("message", e.getMessage());
|
||||
m.put("javaClassName", e.getClass().getName());
|
||||
final Map<String, Object> m2 = new TreeMap<String, Object>();
|
||||
m2.put(RemoteException.class.getSimpleName(), m);
|
||||
final String js = JSON.toString(m2);
|
||||
|
||||
return Response.status(s).type(MediaType.APPLICATION_JSON).entity(js)
|
||||
.build();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.webapp;
|
||||
|
||||
import javax.ws.rs.WebApplicationException;
|
||||
import javax.ws.rs.core.Response.Status;
|
||||
|
||||
/*
|
||||
* Created our own NotFoundException because com.sun.jersey.api.NotFoundException
|
||||
* sets the Response and therefore won't be handled by the GenericExceptionhandler
|
||||
* to fill in correct response.
|
||||
*/
|
||||
public class NotFoundException extends WebApplicationException {
|
||||
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public NotFoundException() {
|
||||
super(Status.NOT_FOUND);
|
||||
}
|
||||
|
||||
public NotFoundException(java.lang.Throwable cause) {
|
||||
super(cause, Status.NOT_FOUND);
|
||||
}
|
||||
|
||||
public NotFoundException(String msg) {
|
||||
super(new Exception(msg), Status.NOT_FOUND);
|
||||
}
|
||||
}
|
|
@ -18,23 +18,28 @@
|
|||
|
||||
package org.apache.hadoop.yarn.webapp;
|
||||
|
||||
import static com.google.common.base.Preconditions.checkNotNull;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.base.CharMatcher;
|
||||
import static com.google.common.base.Preconditions.*;
|
||||
import com.google.common.base.Splitter;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.inject.Provides;
|
||||
import com.google.inject.servlet.GuiceFilter;
|
||||
import com.google.inject.servlet.ServletModule;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpServer;
|
||||
import org.apache.hadoop.yarn.util.StringHelper;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import com.sun.jersey.api.core.ResourceConfig;
|
||||
import com.sun.jersey.core.util.FeaturesAndProperties;
|
||||
import com.sun.jersey.guice.spi.container.servlet.GuiceContainer;
|
||||
import com.sun.jersey.spi.container.servlet.ServletContainer;
|
||||
|
||||
/**
|
||||
* @see WebApps for a usage example
|
||||
|
@ -45,9 +50,10 @@ public abstract class WebApp extends ServletModule {
|
|||
public enum HTTP { GET, POST, HEAD, PUT, DELETE };
|
||||
|
||||
private volatile String name;
|
||||
private volatile List<String> servePathSpecs = new ArrayList<String>();
|
||||
private volatile List<String> servePathSpecs = new ArrayList<String>();
|
||||
// path to redirect to if user goes to "/"
|
||||
private volatile String redirectPath;
|
||||
private volatile String wsName;
|
||||
private volatile Configuration conf;
|
||||
private volatile HttpServer httpServer;
|
||||
private volatile GuiceFilter guiceFilter;
|
||||
|
@ -104,18 +110,20 @@ public abstract class WebApp extends ServletModule {
|
|||
|
||||
void addServePathSpec(String path) { this.servePathSpecs.add(path); }
|
||||
|
||||
public String[] getServePathSpecs() {
|
||||
public String[] getServePathSpecs() {
|
||||
return this.servePathSpecs.toArray(new String[this.servePathSpecs.size()]);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a path to redirect the user to if they just go to "/". For
|
||||
* instance "/" goes to "/yarn/apps". This allows the filters to
|
||||
* Set a path to redirect the user to if they just go to "/". For
|
||||
* instance "/" goes to "/yarn/apps". This allows the filters to
|
||||
* more easily differentiate the different webapps.
|
||||
* @param path the path to redirect to
|
||||
*/
|
||||
void setRedirectPath(String path) { this.redirectPath = path; }
|
||||
|
||||
void setWebServices (String name) { this.wsName = name; }
|
||||
|
||||
public String getRedirectPath() { return this.redirectPath; }
|
||||
|
||||
void setHostClass(Class<?> cls) {
|
||||
|
@ -129,10 +137,32 @@ public abstract class WebApp extends ServletModule {
|
|||
@Override
|
||||
public void configureServlets() {
|
||||
setup();
|
||||
|
||||
serve("/", "/__stop").with(Dispatcher.class);
|
||||
|
||||
for (String path : this.servePathSpecs) {
|
||||
serve(path).with(Dispatcher.class);
|
||||
}
|
||||
|
||||
// Add in the web services filters/serves if app has them.
|
||||
// Using Jersey/guice integration module. If user has web services
|
||||
// they must have also bound a default one in their webapp code.
|
||||
if (this.wsName != null) {
|
||||
// There seems to be an issue with the guice/jersey integration
|
||||
// where we have to list the stuff we don't want it to serve
|
||||
// through the guicecontainer. In this case its everything except
|
||||
// the the web services api prefix. We can't just change the filter
|
||||
// from /* below - that doesn't work.
|
||||
String regex = "(?!/" + this.wsName + ")";
|
||||
serveRegex(regex).with(DefaultWrapperServlet.class);
|
||||
|
||||
Map<String, String> params = new HashMap<String, String>();
|
||||
params.put(ResourceConfig.FEATURE_IMPLICIT_VIEWABLES, "true");
|
||||
params.put(ServletContainer.FEATURE_FILTER_FORWARD_ON_404, "true");
|
||||
params.put(FeaturesAndProperties.FEATURE_XMLROOTELEMENT_PROCESSING, "true");
|
||||
filter("/*").through(GuiceContainer.class, params);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -72,6 +72,7 @@ public class WebApps {
|
|||
}
|
||||
|
||||
final String name;
|
||||
final String wsName;
|
||||
final Class<T> api;
|
||||
final T application;
|
||||
String bindAddress = "0.0.0.0";
|
||||
|
@ -82,10 +83,15 @@ public class WebApps {
|
|||
private final HashSet<ServletStruct> servlets = new HashSet<ServletStruct>();
|
||||
private final HashMap<String, Object> attributes = new HashMap<String, Object>();
|
||||
|
||||
Builder(String name, Class<T> api, T application) {
|
||||
Builder(String name, Class<T> api, T application, String wsName) {
|
||||
this.name = name;
|
||||
this.api = api;
|
||||
this.application = application;
|
||||
this.wsName = wsName;
|
||||
}
|
||||
|
||||
Builder(String name, Class<T> api, T application) {
|
||||
this(name, api, application, null);
|
||||
}
|
||||
|
||||
public Builder<T> at(String bindAddress) {
|
||||
|
@ -142,6 +148,7 @@ public class WebApps {
|
|||
};
|
||||
}
|
||||
webapp.setName(name);
|
||||
webapp.setWebServices(wsName);
|
||||
String basePath = "/" + name;
|
||||
webapp.setRedirectPath(basePath);
|
||||
if (basePath.equals("/")) {
|
||||
|
@ -150,6 +157,14 @@ public class WebApps {
|
|||
webapp.addServePathSpec(basePath);
|
||||
webapp.addServePathSpec(basePath + "/*");
|
||||
}
|
||||
if (wsName != null && !wsName.equals(basePath)) {
|
||||
if (wsName.equals("/")) {
|
||||
webapp.addServePathSpec("/*");
|
||||
} else {
|
||||
webapp.addServePathSpec("/" + wsName);
|
||||
webapp.addServePathSpec("/" + wsName + "/*");
|
||||
}
|
||||
}
|
||||
if (conf == null) {
|
||||
conf = new Configuration();
|
||||
}
|
||||
|
@ -231,6 +246,20 @@ public class WebApps {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new webapp builder.
|
||||
* @see WebApps for a complete example
|
||||
* @param <T> application (holding the embedded webapp) type
|
||||
* @param prefix of the webapp
|
||||
* @param api the api class for the application
|
||||
* @param app the application instance
|
||||
* @param wsPrefix the prefix for the webservice api for this app
|
||||
* @return a webapp builder
|
||||
*/
|
||||
public static <T> Builder<T> $for(String prefix, Class<T> api, T app, String wsPrefix) {
|
||||
return new Builder<T>(prefix, api, app, wsPrefix);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new webapp builder.
|
||||
* @see WebApps for a complete example
|
||||
|
|
|
@ -28,9 +28,9 @@ import java.util.Map.Entry;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
|
@ -88,13 +88,11 @@ public class AllApplicationsPage extends NMView {
|
|||
.tbody();
|
||||
for (Entry<ApplicationId, Application> entry : this.nmContext
|
||||
.getApplications().entrySet()) {
|
||||
ApplicationId appId = entry.getKey();
|
||||
Application app = entry.getValue();
|
||||
String appIdStr = ConverterUtils.toString(appId);
|
||||
AppInfo info = new AppInfo(entry.getValue());
|
||||
tableBody
|
||||
.tr()
|
||||
.td().a(url("application", appIdStr), appIdStr)._()
|
||||
.td()._(app.getApplicationState())
|
||||
.td().a(url("application", info.getId()), info.getId())._()
|
||||
.td()._(info.getState())
|
||||
._()
|
||||
._();
|
||||
}
|
||||
|
|
|
@ -28,9 +28,9 @@ import java.util.Map.Entry;
|
|||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.BODY;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
|
@ -83,17 +83,14 @@ public class AllContainersPage extends NMView {
|
|||
._().tbody();
|
||||
for (Entry<ContainerId, Container> entry : this.nmContext
|
||||
.getContainers().entrySet()) {
|
||||
ContainerId containerId = entry.getKey();
|
||||
Container container = entry.getValue();
|
||||
String containerIdStr = ConverterUtils.toString(containerId);
|
||||
ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue());
|
||||
tableBody
|
||||
.tr()
|
||||
.td().a(url("container", containerIdStr), containerIdStr)
|
||||
.td().a(url("container", info.getId()), info.getId())
|
||||
._()
|
||||
.td()._(container.getContainerState())._()
|
||||
.td()._(info.getState())._()
|
||||
.td()
|
||||
.a(url("containerlogs", containerIdStr, container.getUser()),
|
||||
"logs")._()
|
||||
.a(url(info.getShortLogLink()), "logs")._()
|
||||
._();
|
||||
}
|
||||
tableBody._()._()._();
|
||||
|
|
|
@ -23,19 +23,16 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID;
|
|||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
|
||||
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
@ -81,15 +78,14 @@ public class ApplicationPage extends NMView implements YarnWebParams {
|
|||
ConverterUtils.toApplicationId(this.recordFactory,
|
||||
$(APPLICATION_ID));
|
||||
Application app = this.nmContext.getApplications().get(applicationID);
|
||||
Map<ContainerId, Container> containers = app.getContainers();
|
||||
AppInfo info = new AppInfo(app);
|
||||
info("Application's information")
|
||||
._("ApplicationId", ConverterUtils.toString(app.getAppId()))
|
||||
._("ApplicationState", app.getApplicationState().toString())
|
||||
._("User", app.getUser());
|
||||
._("ApplicationId", info.getId())
|
||||
._("ApplicationState", info.getState())
|
||||
._("User", info.getUser());
|
||||
TABLE<Hamlet> containersListBody = html._(InfoBlock.class)
|
||||
.table("#containers");
|
||||
for (ContainerId containerId : containers.keySet()) {
|
||||
String containerIdStr = ConverterUtils.toString(containerId);
|
||||
for (String containerIdStr : info.getContainers()) {
|
||||
containersListBody
|
||||
.tr().td()
|
||||
.a(url("container", containerIdStr), containerIdStr)
|
||||
|
|
|
@ -18,18 +18,16 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.ACCORDION;
|
||||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
@ -77,21 +75,16 @@ public class ContainerPage extends NMView implements YarnWebParams {
|
|||
+ "please go back to the previous page and retry.")._();
|
||||
return;
|
||||
}
|
||||
ContainerStatus containerData = container.cloneAndGetContainerStatus();
|
||||
int exitCode = containerData.getExitStatus();
|
||||
String exiStatus =
|
||||
(exitCode == YarnConfiguration.INVALID_CONTAINER_EXIT_STATUS) ?
|
||||
"N/A" : String.valueOf(exitCode);
|
||||
ContainerInfo info = new ContainerInfo(this.nmContext, container);
|
||||
|
||||
info("Container information")
|
||||
._("ContainerID", $(CONTAINER_ID))
|
||||
._("ContainerState", container.getContainerState())
|
||||
._("ExitStatus", exiStatus)
|
||||
._("Diagnostics", containerData.getDiagnostics())
|
||||
._("User", container.getUser())
|
||||
._("TotalMemoryNeeded",
|
||||
container.getLaunchContext().getResource().getMemory())
|
||||
._("logs", ujoin("containerlogs", $(CONTAINER_ID), container.getUser()),
|
||||
"Link to logs");
|
||||
._("ContainerID", info.getId())
|
||||
._("ContainerState", info.getState())
|
||||
._("ExitStatus", info.getExitStatus())
|
||||
._("Diagnostics", info.getDiagnostics())
|
||||
._("User", info.getUser())
|
||||
._("TotalMemoryNeeded", info.getMemoryNeeded())
|
||||
._("logs", info.getShortLogLink(), "Link to logs");
|
||||
html._(InfoBlock.class);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,62 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp;
|
||||
|
||||
import java.util.Set;
|
||||
import java.util.HashSet;
|
||||
import java.util.Arrays;
|
||||
|
||||
import com.sun.jersey.api.json.JSONConfiguration;
|
||||
import com.sun.jersey.api.json.JSONJAXBContext;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
import javax.ws.rs.ext.ContextResolver;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo;
|
||||
|
||||
@Singleton
|
||||
@Provider
|
||||
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
|
||||
|
||||
private JAXBContext context;
|
||||
private final Set<Class> types;
|
||||
|
||||
// you have to specify all the dao classes here
|
||||
private final Class[] cTypes = {AppInfo.class, AppsInfo.class,
|
||||
ContainerInfo.class, ContainersInfo.class, NodeInfo.class};
|
||||
|
||||
public JAXBContextResolver() throws Exception {
|
||||
this.types = new HashSet<Class>(Arrays.asList(cTypes));
|
||||
// sets the json configuration so that the json output looks like
|
||||
// the xml output
|
||||
this.context = new JSONJAXBContext(JSONConfiguration.natural().
|
||||
rootUnwrapping(false).build(), cTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JAXBContext getContext(Class<?> objectType) {
|
||||
return (types.contains(objectType)) ? context : null;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
/** * Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp;
|
||||
|
||||
import java.util.Map.Entry;
|
||||
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
import javax.ws.rs.core.UriInfo;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationState;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.AppsInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainerInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.ContainersInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
@Path("/ws/v1/node")
|
||||
public class NMWebServices {
|
||||
private Context nmContext;
|
||||
private ResourceView rview;
|
||||
private WebApp webapp;
|
||||
private static RecordFactory recordFactory = RecordFactoryProvider
|
||||
.getRecordFactory(null);
|
||||
|
||||
@javax.ws.rs.core.Context
|
||||
UriInfo uriInfo;
|
||||
|
||||
@Inject
|
||||
public NMWebServices(final Context nm, final ResourceView view,
|
||||
final WebApp webapp) {
|
||||
this.nmContext = nm;
|
||||
this.rview = view;
|
||||
this.webapp = webapp;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public NodeInfo get() {
|
||||
return getNodeInfo();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/info")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public NodeInfo getNodeInfo() {
|
||||
return new NodeInfo(this.nmContext, this.rview);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/apps")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AppsInfo getNodeApps(@QueryParam("state") String stateQuery,
|
||||
@QueryParam("user") String userQuery) {
|
||||
AppsInfo allApps = new AppsInfo();
|
||||
for (Entry<ApplicationId, Application> entry : this.nmContext
|
||||
.getApplications().entrySet()) {
|
||||
|
||||
AppInfo appInfo = new AppInfo(entry.getValue());
|
||||
if (stateQuery != null && !stateQuery.isEmpty()) {
|
||||
ApplicationState state = ApplicationState.valueOf(stateQuery);
|
||||
if (!appInfo.getState().equalsIgnoreCase(stateQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (userQuery != null && !userQuery.isEmpty()) {
|
||||
if (!appInfo.getUser().toString().equals(userQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
allApps.add(appInfo);
|
||||
}
|
||||
return allApps;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/apps/{appid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AppInfo getNodeApp(@PathParam("appid") String appId) {
|
||||
ApplicationId id = ConverterUtils.toApplicationId(recordFactory, appId);
|
||||
if (id == null) {
|
||||
throw new NotFoundException("app with id " + appId + " not found");
|
||||
}
|
||||
Application app = this.nmContext.getApplications().get(id);
|
||||
if (app == null) {
|
||||
throw new NotFoundException("app with id " + appId + " not found");
|
||||
}
|
||||
return new AppInfo(app);
|
||||
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/containers")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ContainersInfo getNodeContainers() {
|
||||
ContainersInfo allContainers = new ContainersInfo();
|
||||
for (Entry<ContainerId, Container> entry : this.nmContext.getContainers()
|
||||
.entrySet()) {
|
||||
if (entry.getValue() == null) {
|
||||
// just skip it
|
||||
continue;
|
||||
}
|
||||
ContainerInfo info = new ContainerInfo(this.nmContext, entry.getValue(),
|
||||
uriInfo.getBaseUri().toString(), webapp.name());
|
||||
allContainers.add(info);
|
||||
}
|
||||
return allContainers;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/containers/{containerid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ContainerInfo getNodeContainer(@PathParam("containerid") String id) {
|
||||
ContainerId containerId = null;
|
||||
containerId = ConverterUtils.toContainerId(id);
|
||||
if (containerId == null) {
|
||||
throw new NotFoundException("container with id, " + id
|
||||
+ ", is empty or null");
|
||||
}
|
||||
Container container = nmContext.getContainers().get(containerId);
|
||||
if (container == null) {
|
||||
throw new NotFoundException("container with id, " + id + ", not found");
|
||||
}
|
||||
return new ContainerInfo(this.nmContext, container, uriInfo.getBaseUri()
|
||||
.toString(), webapp.name());
|
||||
|
||||
}
|
||||
|
||||
}
|
|
@ -23,10 +23,10 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
|
|||
|
||||
import java.util.Date;
|
||||
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.hadoop.yarn.util.YarnVersionInfo;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.webapp.dao.NodeInfo;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.HTML;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
@ -36,6 +36,8 @@ import com.google.inject.Inject;
|
|||
|
||||
public class NodePage extends NMView {
|
||||
|
||||
private static final long BYTES_IN_MB = 1024 * 1024;
|
||||
|
||||
@Override
|
||||
protected void commonPreHead(HTML<_> html) {
|
||||
super.commonPreHead(html);
|
||||
|
@ -60,21 +62,22 @@ public class NodePage extends NMView {
|
|||
|
||||
@Override
|
||||
protected void render(Block html) {
|
||||
NodeInfo info = new NodeInfo(this.context, this.resourceView);
|
||||
info("NodeManager information")
|
||||
._("Total Vmem allocated for Containers",
|
||||
this.resourceView.getVmemAllocatedForContainers() + "bytes")
|
||||
StringUtils.byteDesc(info.getTotalVmemAllocated() * BYTES_IN_MB))
|
||||
._("Total Pmem allocated for Container",
|
||||
this.resourceView.getPmemAllocatedForContainers() + "bytes")
|
||||
StringUtils.byteDesc(info.getTotalPmemAllocated() * BYTES_IN_MB))
|
||||
._("NodeHealthyStatus",
|
||||
this.context.getNodeHealthStatus().getIsNodeHealthy())
|
||||
info.getHealthStatus())
|
||||
._("LastNodeHealthTime", new Date(
|
||||
this.context.getNodeHealthStatus().getLastHealthReportTime()))
|
||||
info.getLastNodeUpdateTime()))
|
||||
._("NodeHealthReport",
|
||||
this.context.getNodeHealthStatus().getHealthReport())
|
||||
._("Node Manager Version:", YarnVersionInfo.getBuildVersion() +
|
||||
" on " + YarnVersionInfo.getDate())
|
||||
._("Hadoop Version:", VersionInfo.getBuildVersion() +
|
||||
" on " + VersionInfo.getDate());
|
||||
info.getHealthReport())
|
||||
._("Node Manager Version:", info.getNMBuildVersion() +
|
||||
" on " + info.getNMVersionBuiltOn())
|
||||
._("Hadoop Version:", info.getHadoopBuildVersion() +
|
||||
" on " + info.getHadoopVersionBuiltOn());
|
||||
html._(InfoBlock.class);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,9 +30,10 @@ import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
|
|||
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
|
||||
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||
import org.apache.hadoop.yarn.service.AbstractService;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
import org.apache.hadoop.yarn.webapp.WebApps;
|
||||
import org.apache.hadoop.yarn.webapp.YarnWebParams;
|
||||
|
||||
public class WebServer extends AbstractService {
|
||||
|
||||
|
@ -61,8 +62,9 @@ public class WebServer extends AbstractService {
|
|||
YarnConfiguration.DEFAULT_NM_WEBAPP_ADDRESS);
|
||||
LOG.info("Instantiating NMWebApp at " + bindAddress);
|
||||
try {
|
||||
this.webApp = WebApps.$for("node", Context.class, this.nmContext).at(
|
||||
bindAddress).with(getConfig()).start(this.nmWebApp);
|
||||
this.webApp =
|
||||
WebApps.$for("node", Context.class, this.nmContext, "ws")
|
||||
.at(bindAddress).with(getConfig()).start(this.nmWebApp);
|
||||
} catch (Exception e) {
|
||||
String msg = "NMWebapps failed to start.";
|
||||
LOG.error(msg, e);
|
||||
|
@ -95,6 +97,9 @@ public class WebServer extends AbstractService {
|
|||
|
||||
@Override
|
||||
public void setup() {
|
||||
bind(NMWebServices.class);
|
||||
bind(GenericExceptionHandler.class);
|
||||
bind(JAXBContextResolver.class);
|
||||
bind(ResourceView.class).toInstance(this.resourceView);
|
||||
bind(ApplicationACLsManager.class).toInstance(this.aclsManager);
|
||||
bind(LocalDirsHandlerService.class).toInstance(dirsHandler);
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
|
||||
@XmlRootElement(name = "app")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AppInfo {
|
||||
|
||||
protected String id;
|
||||
protected String state;
|
||||
protected String user;
|
||||
protected ArrayList<String> containerids;
|
||||
|
||||
public AppInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public AppInfo(final Application app) {
|
||||
this.id = ConverterUtils.toString(app.getAppId());
|
||||
this.state = app.getApplicationState().toString();
|
||||
this.user = app.getUser();
|
||||
|
||||
this.containerids = new ArrayList<String>();
|
||||
Map<ContainerId, Container> appContainers = app.getContainers();
|
||||
for (ContainerId containerId : appContainers.keySet()) {
|
||||
String containerIdStr = ConverterUtils.toString(containerId);
|
||||
containerids.add(containerIdStr);
|
||||
}
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public String getUser() {
|
||||
return this.user;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public ArrayList<String> getContainers() {
|
||||
return this.containerids;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "apps")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AppsInfo {
|
||||
|
||||
protected ArrayList<AppInfo> app = new ArrayList<AppInfo>();
|
||||
|
||||
public AppsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(AppInfo appInfo) {
|
||||
app.add(appInfo);
|
||||
}
|
||||
|
||||
public ArrayList<AppInfo> getApps() {
|
||||
return app;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp.dao;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.ujoin;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ContainerStatus;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
|
||||
@XmlRootElement(name = "container")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ContainerInfo {
|
||||
|
||||
protected String id;
|
||||
protected String state;
|
||||
protected int exitCode;
|
||||
protected String diagnostics;
|
||||
protected String user;
|
||||
protected long totalMemoryNeededMB;
|
||||
protected String containerLogsLink;
|
||||
protected String nodeId;
|
||||
@XmlTransient
|
||||
protected String containerLogsShortLink;
|
||||
@XmlTransient
|
||||
protected String exitStatus;
|
||||
|
||||
public ContainerInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public ContainerInfo(final Context nmContext, final Container container) {
|
||||
this(nmContext, container, "", "");
|
||||
}
|
||||
|
||||
public ContainerInfo(final Context nmContext, final Container container,
|
||||
final String requestUri, final String pathPrefix) {
|
||||
|
||||
this.id = container.getContainerID().toString();
|
||||
this.nodeId = nmContext.getNodeId().toString();
|
||||
ContainerStatus containerData = container.cloneAndGetContainerStatus();
|
||||
this.exitCode = containerData.getExitStatus();
|
||||
this.exitStatus = (this.exitCode == YarnConfiguration.INVALID_CONTAINER_EXIT_STATUS) ? "N/A"
|
||||
: String.valueOf(exitCode);
|
||||
this.state = container.getContainerState().toString();
|
||||
this.diagnostics = containerData.getDiagnostics();
|
||||
if (this.diagnostics == null || this.diagnostics.isEmpty()) {
|
||||
this.diagnostics = "";
|
||||
}
|
||||
|
||||
this.user = container.getUser();
|
||||
this.totalMemoryNeededMB = container.getLaunchContext().getResource()
|
||||
.getMemory();
|
||||
this.containerLogsShortLink = ujoin("containerlogs", this.id,
|
||||
container.getUser());
|
||||
this.containerLogsLink = join(requestUri, pathPrefix,
|
||||
this.containerLogsShortLink);
|
||||
}
|
||||
|
||||
public String getId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.nodeId;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public int getExitCode() {
|
||||
return this.exitCode;
|
||||
}
|
||||
|
||||
public String getExitStatus() {
|
||||
return this.exitStatus;
|
||||
}
|
||||
|
||||
public String getDiagnostics() {
|
||||
return this.diagnostics;
|
||||
}
|
||||
|
||||
public String getUser() {
|
||||
return this.user;
|
||||
}
|
||||
|
||||
public String getShortLogLink() {
|
||||
return this.containerLogsShortLink;
|
||||
}
|
||||
|
||||
public String getLogLink() {
|
||||
return this.containerLogsLink;
|
||||
}
|
||||
|
||||
public long getMemoryNeeded() {
|
||||
return this.totalMemoryNeededMB;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "containers")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ContainersInfo {
|
||||
|
||||
protected ArrayList<ContainerInfo> container = new ArrayList<ContainerInfo>();
|
||||
|
||||
public ContainersInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(ContainerInfo containerInfo) {
|
||||
container.add(containerInfo);
|
||||
}
|
||||
|
||||
public ArrayList<ContainerInfo> getContainers() {
|
||||
return container;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.nodemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.Context;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ResourceView;
|
||||
import org.apache.hadoop.yarn.util.YarnVersionInfo;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class NodeInfo {
|
||||
|
||||
private static final long BYTES_IN_MB = 1024 * 1024;
|
||||
|
||||
protected String healthReport;
|
||||
protected long totalVmemAllocatedContainersMB;
|
||||
protected long totalPmemAllocatedContainersMB;
|
||||
protected long lastNodeUpdateTime;
|
||||
protected boolean nodeHealthy;
|
||||
protected String nodeManagerVersion;
|
||||
protected String nodeManagerBuildVersion;
|
||||
protected String nodeManagerVersionBuiltOn;
|
||||
protected String hadoopVersion;
|
||||
protected String hadoopBuildVersion;
|
||||
protected String hadoopVersionBuiltOn;
|
||||
protected String id;
|
||||
protected String nodeHostName;
|
||||
|
||||
public NodeInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public NodeInfo(final Context context, final ResourceView resourceView) {
|
||||
|
||||
this.id = context.getNodeId().toString();
|
||||
this.nodeHostName = context.getNodeId().getHost();
|
||||
this.totalVmemAllocatedContainersMB = resourceView
|
||||
.getVmemAllocatedForContainers() / BYTES_IN_MB;
|
||||
this.totalPmemAllocatedContainersMB = resourceView
|
||||
.getPmemAllocatedForContainers() / BYTES_IN_MB;
|
||||
this.nodeHealthy = context.getNodeHealthStatus().getIsNodeHealthy();
|
||||
this.lastNodeUpdateTime = context.getNodeHealthStatus()
|
||||
.getLastHealthReportTime();
|
||||
|
||||
this.healthReport = context.getNodeHealthStatus().getHealthReport();
|
||||
|
||||
this.nodeManagerVersion = YarnVersionInfo.getVersion();
|
||||
this.nodeManagerBuildVersion = YarnVersionInfo.getBuildVersion();
|
||||
this.nodeManagerVersionBuiltOn = YarnVersionInfo.getDate();
|
||||
this.hadoopVersion = VersionInfo.getVersion();
|
||||
this.hadoopBuildVersion = VersionInfo.getBuildVersion();
|
||||
this.hadoopVersionBuiltOn = VersionInfo.getDate();
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public String getNodeHostName() {
|
||||
return this.nodeHostName;
|
||||
}
|
||||
|
||||
public String getNMVersion() {
|
||||
return this.nodeManagerVersion;
|
||||
}
|
||||
|
||||
public String getNMBuildVersion() {
|
||||
return this.nodeManagerBuildVersion;
|
||||
}
|
||||
|
||||
public String getNMVersionBuiltOn() {
|
||||
return this.nodeManagerVersionBuiltOn;
|
||||
}
|
||||
|
||||
public String getHadoopVersion() {
|
||||
return this.hadoopVersion;
|
||||
}
|
||||
|
||||
public String getHadoopBuildVersion() {
|
||||
return this.hadoopBuildVersion;
|
||||
}
|
||||
|
||||
public String getHadoopVersionBuiltOn() {
|
||||
return this.hadoopVersionBuiltOn;
|
||||
}
|
||||
|
||||
public boolean getHealthStatus() {
|
||||
return this.nodeHealthy;
|
||||
}
|
||||
|
||||
public long getLastNodeUpdateTime() {
|
||||
return this.lastNodeUpdateTime;
|
||||
}
|
||||
|
||||
public String getHealthReport() {
|
||||
return this.healthReport;
|
||||
}
|
||||
|
||||
public long getTotalVmemAllocated() {
|
||||
return this.totalVmemAllocatedContainersMB;
|
||||
}
|
||||
|
||||
public long getTotalPmemAllocated() {
|
||||
return this.totalPmemAllocatedContainersMB;
|
||||
}
|
||||
|
||||
}
|
|
@ -412,7 +412,7 @@ public class ResourceManager extends CompositeService implements Recoverable {
|
|||
|
||||
protected void startWepApp() {
|
||||
Builder<ApplicationMasterService> builder =
|
||||
WebApps.$for("cluster", masterService).at(
|
||||
WebApps.$for("cluster", ApplicationMasterService.class, masterService, "ws").at(
|
||||
this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS));
|
||||
if(YarnConfiguration.getRMWebAppHostAndPort(conf).
|
||||
|
|
|
@ -18,10 +18,9 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
import org.apache.hadoop.yarn.util.YarnVersionInfo;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
||||
|
||||
|
@ -30,25 +29,25 @@ import com.google.inject.Inject;
|
|||
public class AboutBlock extends HtmlBlock {
|
||||
final ResourceManager rm;
|
||||
|
||||
@Inject
|
||||
@Inject
|
||||
AboutBlock(ResourceManager rm, ViewContext ctx) {
|
||||
super(ctx);
|
||||
this.rm = rm;
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
protected void render(Block html) {
|
||||
html._(MetricsOverviewTable.class);
|
||||
long ts = ResourceManager.clusterTimeStamp;
|
||||
ResourceManager rm = getInstance(ResourceManager.class);
|
||||
ClusterInfo cinfo = new ClusterInfo(rm);
|
||||
info("Cluster overview").
|
||||
_("Cluster ID:", ts).
|
||||
_("ResourceManager state:", rm.getServiceState()).
|
||||
_("ResourceManager started on:", Times.format(ts)).
|
||||
_("ResourceManager version:", YarnVersionInfo.getBuildVersion() +
|
||||
" on " + YarnVersionInfo.getDate()).
|
||||
_("Hadoop version:", VersionInfo.getBuildVersion() +
|
||||
" on " + VersionInfo.getDate());
|
||||
_("Cluster ID:", cinfo.getClusterId()).
|
||||
_("ResourceManager state:", cinfo.getState()).
|
||||
_("ResourceManager started on:", Times.format(cinfo.getStartedOn())).
|
||||
_("ResourceManager version:", cinfo.getRMBuildVersion() +
|
||||
" on " + cinfo.getRMVersionBuiltOn()).
|
||||
_("Hadoop version:", cinfo.getHadoopBuildVersion() +
|
||||
" on " + cinfo.getHadoopVersionBuiltOn());
|
||||
html._(InfoBlock.class);
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR;
|
|||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI._PROGRESSBAR_VALUE;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TABLE;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.TBODY;
|
||||
|
@ -56,23 +57,18 @@ class AppsBlock extends HtmlBlock {
|
|||
tbody();
|
||||
int i = 0;
|
||||
for (RMApp app : list.apps.values()) {
|
||||
String appId = app.getApplicationId().toString();
|
||||
String trackingUrl = app.getTrackingUrl();
|
||||
boolean trackingUrlIsNotReady = trackingUrl == null || trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
|
||||
String ui = trackingUrlIsNotReady ? "UNASSIGNED" :
|
||||
(app.getFinishTime() == 0 ?
|
||||
"ApplicationMaster" : "History");
|
||||
String percent = String.format("%.1f", app.getProgress() * 100);
|
||||
AppInfo appInfo = new AppInfo(app, true);
|
||||
String percent = String.format("%.1f", appInfo.getProgress());
|
||||
tbody.
|
||||
tr().
|
||||
td().
|
||||
br().$title(String.valueOf(app.getApplicationId().getId()))._(). // for sorting
|
||||
a(url("app", appId), appId)._().
|
||||
td(app.getUser().toString()).
|
||||
td(app.getName().toString()).
|
||||
td(app.getQueue().toString()).
|
||||
td(app.getState().toString()).
|
||||
td(app.getFinalApplicationStatus().toString()).
|
||||
br().$title(appInfo.getAppIdNum())._(). // for sorting
|
||||
a(url("app", appInfo.getAppId()), appInfo.getAppId())._().
|
||||
td(appInfo.getUser()).
|
||||
td(appInfo.getName()).
|
||||
td(appInfo.getQueue()).
|
||||
td(appInfo.getState()).
|
||||
td(appInfo.getFinalStatus()).
|
||||
td().
|
||||
br().$title(percent)._(). // for sorting
|
||||
div(_PROGRESSBAR).
|
||||
|
@ -80,9 +76,9 @@ class AppsBlock extends HtmlBlock {
|
|||
div(_PROGRESSBAR_VALUE).
|
||||
$style(join("width:", percent, '%'))._()._()._().
|
||||
td().
|
||||
a(trackingUrlIsNotReady ?
|
||||
"#" : join("http://", trackingUrl), ui)._().
|
||||
td(app.getDiagnostics().toString())._();
|
||||
a(!appInfo.isTrackingUrlReady()?
|
||||
"#" : appInfo.getTrackingUrlPretty(), appInfo.getTrackingUI())._().
|
||||
td(appInfo.getNote())._();
|
||||
if (list.rendering != Render.HTML && ++i >= 20) break;
|
||||
}
|
||||
tbody._()._();
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.concurrent.ConcurrentMap;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
|
||||
import org.apache.hadoop.yarn.webapp.ToJSON;
|
||||
import org.apache.hadoop.yarn.webapp.view.JQueryUI.Render;
|
||||
|
@ -54,31 +55,27 @@ class AppsList implements ToJSON {
|
|||
out.append('[');
|
||||
boolean first = true;
|
||||
for (RMApp app : apps.values()) {
|
||||
AppInfo appInfo = new AppInfo(app, false);
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
out.append(",\n");
|
||||
}
|
||||
String appID = app.getApplicationId().toString();
|
||||
String trackingUrl = app.getTrackingUrl();
|
||||
boolean trackingUrlIsNotReady = trackingUrl == null
|
||||
|| trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
|
||||
String ui = trackingUrlIsNotReady ? "UNASSIGNED"
|
||||
: (app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
|
||||
out.append("[\"");
|
||||
appendSortable(out, app.getApplicationId().getId());
|
||||
appendLink(out, appID, rc.prefix(), "app", appID).append(_SEP).
|
||||
append(escapeHtml(app.getUser().toString())).append(_SEP).
|
||||
append(escapeHtml(app.getName().toString())).append(_SEP).
|
||||
append(escapeHtml(app.getQueue())).append(_SEP).
|
||||
append(app.getState().toString()).append(_SEP).
|
||||
append(app.getFinalApplicationStatus().toString()).append(_SEP);
|
||||
appendProgressBar(out, app.getProgress()).append(_SEP);
|
||||
appendLink(out, ui, rc.prefix(),
|
||||
trackingUrlIsNotReady ?
|
||||
"#" : "http://", trackingUrl).
|
||||
appendSortable(out, appInfo.getAppIdNum());
|
||||
appendLink(out, appInfo.getAppId(), rc.prefix(), "app",
|
||||
appInfo.getAppId()).append(_SEP).
|
||||
append(escapeHtml(appInfo.getUser())).append(_SEP).
|
||||
append(escapeHtml(appInfo.getName())).append(_SEP).
|
||||
append(escapeHtml(appInfo.getQueue())).append(_SEP).
|
||||
append(appInfo.getState()).append(_SEP).
|
||||
append(appInfo.getFinalStatus()).append(_SEP);
|
||||
appendProgressBar(out, appInfo.getProgress()).append(_SEP);
|
||||
appendLink(out, appInfo.getTrackingUI(), rc.prefix(),
|
||||
!appInfo.isTrackingUrlReady() ?
|
||||
"#" : appInfo.getTrackingUrlPretty()).
|
||||
append(_SEP).append(escapeJavaScript(escapeHtml(
|
||||
app.getDiagnostics().toString()))).
|
||||
appInfo.getNote()))).
|
||||
append("\"]");
|
||||
}
|
||||
out.append(']');
|
||||
|
|
|
@ -18,19 +18,23 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.servlet.RequestScoped;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.LI;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.servlet.RequestScoped;
|
||||
|
||||
class CapacitySchedulerPage extends RmView {
|
||||
static final String _Q = ".ui-state-default.ui-corner-all";
|
||||
|
@ -47,22 +51,21 @@ class CapacitySchedulerPage extends RmView {
|
|||
|
||||
public static class QueueBlock extends HtmlBlock {
|
||||
final Parent parent;
|
||||
final CapacitySchedulerInfo sinfo;
|
||||
|
||||
@Inject QueueBlock(Parent parent) {
|
||||
this.parent = parent;
|
||||
sinfo = new CapacitySchedulerInfo(parent.queue);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void render(Block html) {
|
||||
UL<Hamlet> ul = html.ul();
|
||||
CSQueue parentQueue = parent.queue;
|
||||
for (CSQueue queue : parentQueue.getChildQueues()) {
|
||||
float used = queue.getUsedCapacity();
|
||||
float set = queue.getCapacity();
|
||||
for (CapacitySchedulerQueueInfo info : sinfo.getSubQueues()) {
|
||||
float used = info.getUsedCapacity() / 100;
|
||||
float set = info.getCapacity() / 100;
|
||||
float delta = Math.abs(set - used) + 0.001f;
|
||||
float max = queue.getMaximumCapacity();
|
||||
if (max < EPSILON || max > 1f) max = 1f;
|
||||
//String absMaxPct = percent(queue.getAbsoluteMaximumCapacity());
|
||||
float max = info.getMaxCapacity() / 100;
|
||||
LI<UL<Hamlet>> li = ul.
|
||||
li().
|
||||
a(_Q).$style(width(max * WIDTH_F)).
|
||||
|
@ -72,14 +75,16 @@ class CapacitySchedulerPage extends RmView {
|
|||
span().$style(join(width(delta/max), ';',
|
||||
used > set ? OVER : UNDER, ';',
|
||||
used > set ? left(set/max) : left(used/max)))._('.')._().
|
||||
span(".q", queue.getQueuePath().substring(5))._();
|
||||
if (queue instanceof ParentQueue) {
|
||||
parent.queue = queue;
|
||||
span(".q", info.getQueuePath().substring(5))._();
|
||||
if (info.getQueue() instanceof ParentQueue) {
|
||||
// this could be optimized better
|
||||
parent.queue = info.getQueue();
|
||||
li.
|
||||
_(QueueBlock.class);
|
||||
}
|
||||
li._();
|
||||
}
|
||||
|
||||
ul._();
|
||||
}
|
||||
}
|
||||
|
@ -111,8 +116,9 @@ class CapacitySchedulerPage extends RmView {
|
|||
} else {
|
||||
CSQueue root = cs.getRootQueue();
|
||||
parent.queue = root;
|
||||
float used = root.getUsedCapacity();
|
||||
float set = root.getCapacity();
|
||||
CapacitySchedulerInfo sinfo = new CapacitySchedulerInfo(parent.queue);
|
||||
float used = sinfo.getUsedCapacity() / 100;
|
||||
float set = sinfo.getCapacity() / 100;
|
||||
float delta = Math.abs(set - used) + 0.001f;
|
||||
ul.
|
||||
li().
|
||||
|
|
|
@ -18,22 +18,20 @@
|
|||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.UL;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
import org.apache.hadoop.yarn.api.records.QueueInfo;
|
||||
import org.apache.hadoop.yarn.api.records.QueueState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.webapp.view.InfoBlock;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.*;
|
||||
import com.google.inject.Inject;
|
||||
|
||||
class DefaultSchedulerPage extends RmView {
|
||||
static final String _Q = ".ui-state-default.ui-corner-all";
|
||||
|
@ -44,66 +42,35 @@ class DefaultSchedulerPage extends RmView {
|
|||
static final float EPSILON = 1e-8f;
|
||||
|
||||
static class QueueInfoBlock extends HtmlBlock {
|
||||
final RMContext rmContext;
|
||||
final FifoScheduler fs;
|
||||
final String qName;
|
||||
final QueueInfo qInfo;
|
||||
final FifoSchedulerInfo sinfo;
|
||||
|
||||
@Inject QueueInfoBlock(RMContext context, ViewContext ctx, ResourceManager rm) {
|
||||
super(ctx);
|
||||
this.rmContext = context;
|
||||
|
||||
fs = (FifoScheduler) rm.getResourceScheduler();
|
||||
qName = fs.getQueueInfo("",false,false).getQueueName();
|
||||
qInfo = fs.getQueueInfo(qName,true,true);
|
||||
sinfo = new FifoSchedulerInfo(rm);
|
||||
}
|
||||
|
||||
@Override public void render(Block html) {
|
||||
String minmemoryresource =
|
||||
Integer.toString(fs.getMinimumResourceCapability().getMemory());
|
||||
String maxmemoryresource =
|
||||
Integer.toString(fs.getMaximumResourceCapability().getMemory());
|
||||
String qstate = (qInfo.getQueueState() == QueueState.RUNNING) ?
|
||||
"Running" :
|
||||
(qInfo.getQueueState() == QueueState.STOPPED) ?
|
||||
"Stopped" : "Unknown";
|
||||
|
||||
int usedNodeMem = 0;
|
||||
int availNodeMem = 0;
|
||||
int totNodeMem = 0;
|
||||
int nodeContainers = 0;
|
||||
|
||||
for (RMNode ni : this.rmContext.getRMNodes().values()) {
|
||||
SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID());
|
||||
usedNodeMem += report.getUsedResource().getMemory();
|
||||
availNodeMem += report.getAvailableResource().getMemory();
|
||||
totNodeMem += ni.getTotalCapability().getMemory();
|
||||
nodeContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers();
|
||||
}
|
||||
|
||||
info("\'" + qName + "\' Queue Status").
|
||||
_("Queue State:" , qstate).
|
||||
_("Minimum Queue Memory Capacity:" , minmemoryresource).
|
||||
_("Maximum Queue Memory Capacity:" , maxmemoryresource).
|
||||
_("Number of Nodes:" , Integer.toString(this.rmContext.getRMNodes().size())).
|
||||
_("Used Node Capacity:" , Integer.toString(usedNodeMem)).
|
||||
_("Available Node Capacity:" , Integer.toString(availNodeMem)).
|
||||
_("Total Node Capacity:" , Integer.toString(totNodeMem)).
|
||||
_("Number of Node Containers:" , Integer.toString(nodeContainers));
|
||||
info("\'" + sinfo.getQueueName() + "\' Queue Status").
|
||||
_("Queue State:" , sinfo.getState()).
|
||||
_("Minimum Queue Memory Capacity:" , Integer.toString(sinfo.getMinQueueMemoryCapacity())).
|
||||
_("Maximum Queue Memory Capacity:" , Integer.toString(sinfo.getMaxQueueMemoryCapacity())).
|
||||
_("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())).
|
||||
_("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())).
|
||||
_("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())).
|
||||
_("Total Node Capacity:" , Integer.toString(sinfo.getTotalNodeCapacity())).
|
||||
_("Number of Node Containers:" , Integer.toString(sinfo.getNumContainers()));
|
||||
|
||||
html._(InfoBlock.class);
|
||||
}
|
||||
}
|
||||
|
||||
static class QueuesBlock extends HtmlBlock {
|
||||
final FifoSchedulerInfo sinfo;
|
||||
final FifoScheduler fs;
|
||||
final String qName;
|
||||
final QueueInfo qInfo;
|
||||
|
||||
@Inject QueuesBlock(ResourceManager rm) {
|
||||
sinfo = new FifoSchedulerInfo(rm);
|
||||
fs = (FifoScheduler) rm.getResourceScheduler();
|
||||
qName = fs.getQueueInfo("",false,false).getQueueName();
|
||||
qInfo = fs.getQueueInfo(qName,false,false);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -123,8 +90,8 @@ class DefaultSchedulerPage extends RmView {
|
|||
span().$style(Q_END)._("100% ")._().
|
||||
span(".q", "default")._()._();
|
||||
} else {
|
||||
float used = qInfo.getCurrentCapacity();
|
||||
float set = qInfo.getCapacity();
|
||||
float used = sinfo.getUsedCapacity();
|
||||
float set = sinfo.getCapacity();
|
||||
float delta = Math.abs(set - used) + 0.001f;
|
||||
ul.
|
||||
li().
|
||||
|
@ -133,7 +100,7 @@ class DefaultSchedulerPage extends RmView {
|
|||
span().$style(Q_END)._("100%")._().
|
||||
span().$style(join(width(delta), ';', used > set ? OVER : UNDER,
|
||||
';', used > set ? left(set) : left(used)))._(".")._().
|
||||
span(".q", qName)._().
|
||||
span(".q", sinfo.getQueueName())._().
|
||||
_(QueueInfoBlock.class)._();
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import com.google.inject.Singleton;
|
||||
import com.sun.jersey.api.json.JSONConfiguration;
|
||||
import com.sun.jersey.api.json.JSONJAXBContext;
|
||||
|
||||
import java.util.Arrays;
|
||||
import java.util.HashSet;
|
||||
import java.util.Set;
|
||||
|
||||
import javax.ws.rs.ext.ContextResolver;
|
||||
import javax.ws.rs.ext.Provider;
|
||||
import javax.xml.bind.JAXBContext;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerQueueInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
|
||||
|
||||
@Singleton
|
||||
@Provider
|
||||
public class JAXBContextResolver implements ContextResolver<JAXBContext> {
|
||||
|
||||
private JAXBContext context;
|
||||
private final Set<Class> types;
|
||||
|
||||
// you have to specify all the dao classes here
|
||||
private final Class[] cTypes = { AppInfo.class, ClusterInfo.class,
|
||||
CapacitySchedulerQueueInfo.class, FifoSchedulerInfo.class,
|
||||
SchedulerTypeInfo.class, NodeInfo.class, UserMetricsInfo.class,
|
||||
CapacitySchedulerInfo.class, ClusterMetricsInfo.class,
|
||||
SchedulerInfo.class, AppsInfo.class, NodesInfo.class };
|
||||
|
||||
public JAXBContextResolver() throws Exception {
|
||||
this.types = new HashSet<Class>(Arrays.asList(cTypes));
|
||||
this.context = new JSONJAXBContext(JSONConfiguration.natural()
|
||||
.rootUnwrapping(false).build(), cTypes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public JAXBContext getContext(Class<?> objectType) {
|
||||
return (types.contains(objectType)) ? context : null;
|
||||
}
|
||||
}
|
|
@ -19,11 +19,11 @@
|
|||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.UserMetricsInfo;
|
||||
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.DIV;
|
||||
import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
|
||||
|
@ -36,12 +36,12 @@ import com.google.inject.Inject;
|
|||
* current user is using on the cluster.
|
||||
*/
|
||||
public class MetricsOverviewTable extends HtmlBlock {
|
||||
private static final long BYTES_IN_GB = 1024 * 1024 * 1024;
|
||||
|
||||
private static final long BYTES_IN_MB = 1024 * 1024;
|
||||
|
||||
private final RMContext rmContext;
|
||||
private final ResourceManager rm;
|
||||
|
||||
@Inject
|
||||
@Inject
|
||||
MetricsOverviewTable(RMContext context, ResourceManager rm, ViewContext ctx) {
|
||||
super(ctx);
|
||||
this.rmContext = context;
|
||||
|
@ -55,22 +55,7 @@ public class MetricsOverviewTable extends HtmlBlock {
|
|||
//CSS in the correct spot
|
||||
html.style(".metrics {margin-bottom:5px}");
|
||||
|
||||
ResourceScheduler rs = rm.getResourceScheduler();
|
||||
QueueMetrics metrics = rs.getRootQueueMetrics();
|
||||
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
|
||||
|
||||
int appsSubmitted = metrics.getAppsSubmitted();
|
||||
int reservedGB = metrics.getReservedGB();
|
||||
int availableGB = metrics.getAvailableGB();
|
||||
int allocatedGB = metrics.getAllocatedGB();
|
||||
int containersAllocated = metrics.getAllocatedContainers();
|
||||
int totalGB = availableGB + reservedGB + allocatedGB;
|
||||
|
||||
int totalNodes = clusterMetrics.getNumNMs();
|
||||
int lostNodes = clusterMetrics.getNumLostNMs();
|
||||
int unhealthyNodes = clusterMetrics.getUnhealthyNMs();
|
||||
int decommissionedNodes = clusterMetrics.getNumDecommisionedNMs();
|
||||
int rebootedNodes = clusterMetrics.getNumRebootedNMs();
|
||||
ClusterMetricsInfo clusterMetrics = new ClusterMetricsInfo(this.rm, this.rmContext);
|
||||
|
||||
|
||||
DIV<Hamlet> div = html.div().$class("metrics");
|
||||
|
@ -92,30 +77,23 @@ public class MetricsOverviewTable extends HtmlBlock {
|
|||
_().
|
||||
tbody().$class("ui-widget-content").
|
||||
tr().
|
||||
td(String.valueOf(appsSubmitted)).
|
||||
td(String.valueOf(containersAllocated)).
|
||||
td(StringUtils.byteDesc(allocatedGB * BYTES_IN_GB)).
|
||||
td(StringUtils.byteDesc(totalGB * BYTES_IN_GB)).
|
||||
td(StringUtils.byteDesc(reservedGB * BYTES_IN_GB)).
|
||||
td().a(url("nodes"),String.valueOf(totalNodes))._().
|
||||
td().a(url("nodes/decommissioned"),String.valueOf(decommissionedNodes))._().
|
||||
td().a(url("nodes/lost"),String.valueOf(lostNodes))._().
|
||||
td().a(url("nodes/unhealthy"),String.valueOf(unhealthyNodes))._().
|
||||
td().a(url("nodes/rebooted"),String.valueOf(rebootedNodes))._().
|
||||
td(String.valueOf(clusterMetrics.getAppsSubmitted())).
|
||||
td(String.valueOf(clusterMetrics.getContainersAllocated())).
|
||||
td(StringUtils.byteDesc(clusterMetrics.getAllocatedMB() * BYTES_IN_MB)).
|
||||
td(StringUtils.byteDesc(clusterMetrics.getTotalMB() * BYTES_IN_MB)).
|
||||
td(StringUtils.byteDesc(clusterMetrics.getReservedMB() * BYTES_IN_MB)).
|
||||
td().a(url("nodes"),String.valueOf(clusterMetrics.getTotalNodes()))._().
|
||||
td().a(url("nodes/decommissioned"),String.valueOf(clusterMetrics.getDecommissionedNodes()))._().
|
||||
td().a(url("nodes/lost"),String.valueOf(clusterMetrics.getLostNodes()))._().
|
||||
td().a(url("nodes/unhealthy"),String.valueOf(clusterMetrics.getUnhealthyNodes()))._().
|
||||
td().a(url("nodes/rebooted"),String.valueOf(clusterMetrics.getRebootedNodes()))._().
|
||||
_().
|
||||
_()._();
|
||||
|
||||
|
||||
String user = request().getRemoteUser();
|
||||
if (user != null) {
|
||||
QueueMetrics userMetrics = metrics.getUserMetrics(user);
|
||||
if(userMetrics != null) {
|
||||
int myAppsSubmitted = userMetrics.getAppsSubmitted();
|
||||
int myRunningContainers = userMetrics.getAllocatedContainers();
|
||||
int myPendingContainers = userMetrics.getPendingContainers();
|
||||
int myReservedContainers = userMetrics.getReservedContainers();
|
||||
int myReservedGB = userMetrics.getReservedGB();
|
||||
int myPendingGB = userMetrics.getPendingGB();
|
||||
int myAllocatedGB = userMetrics.getAllocatedGB();
|
||||
UserMetricsInfo userMetrics = new UserMetricsInfo(this.rm, this.rmContext, user);
|
||||
if (userMetrics.metricsAvailable()) {
|
||||
div.table("#usermetricsoverview").
|
||||
thead().$class("ui-widget-header").
|
||||
tr().
|
||||
|
@ -130,13 +108,13 @@ public class MetricsOverviewTable extends HtmlBlock {
|
|||
_().
|
||||
tbody().$class("ui-widget-content").
|
||||
tr().
|
||||
td(String.valueOf(myAppsSubmitted)).
|
||||
td(String.valueOf(myRunningContainers)).
|
||||
td(String.valueOf(myPendingContainers)).
|
||||
td(String.valueOf(myReservedContainers)).
|
||||
td(StringUtils.byteDesc(myAllocatedGB * BYTES_IN_GB)).
|
||||
td(StringUtils.byteDesc(myPendingGB * BYTES_IN_GB)).
|
||||
td(StringUtils.byteDesc(myReservedGB * BYTES_IN_GB)).
|
||||
td(String.valueOf(userMetrics.getAppsSubmitted())).
|
||||
td(String.valueOf(userMetrics.getRunningContainers())).
|
||||
td(String.valueOf(userMetrics.getPendingContainers())).
|
||||
td(String.valueOf(userMetrics.getReservedContainers())).
|
||||
td(StringUtils.byteDesc(userMetrics.getAllocatedMB() * BYTES_IN_MB)).
|
||||
td(StringUtils.byteDesc(userMetrics.getPendingMB() * BYTES_IN_MB)).
|
||||
td(StringUtils.byteDesc(userMetrics.getReservedMB() * BYTES_IN_MB)).
|
||||
_().
|
||||
_()._();
|
||||
}
|
||||
|
|
|
@ -25,14 +25,12 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID;
|
|||
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit;
|
||||
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
import org.apache.hadoop.yarn.webapp.SubView;
|
||||
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
|
||||
|
@ -45,9 +43,9 @@ import com.google.inject.Inject;
|
|||
class NodesPage extends RmView {
|
||||
|
||||
static class NodesBlock extends HtmlBlock {
|
||||
private static final long BYTES_IN_MB = 1024 * 1024;
|
||||
final RMContext rmContext;
|
||||
final ResourceManager rm;
|
||||
private static final long BYTES_IN_MB = 1024 * 1024;
|
||||
|
||||
@Inject
|
||||
NodesBlock(RMContext context, ResourceManager rm, ViewContext ctx) {
|
||||
|
@ -59,7 +57,7 @@ class NodesPage extends RmView {
|
|||
@Override
|
||||
protected void render(Block html) {
|
||||
html._(MetricsOverviewTable.class);
|
||||
|
||||
|
||||
ResourceScheduler sched = rm.getResourceScheduler();
|
||||
String type = $(NODE_STATE);
|
||||
TBODY<TABLE<Hamlet>> tbody = html.table("#nodes").
|
||||
|
@ -88,27 +86,18 @@ class NodesPage extends RmView {
|
|||
continue;
|
||||
}
|
||||
}
|
||||
NodeId id = ni.getNodeID();
|
||||
SchedulerNodeReport report = sched.getNodeReport(id);
|
||||
int numContainers = 0;
|
||||
int usedMemory = 0;
|
||||
int availableMemory = 0;
|
||||
if(report != null) {
|
||||
numContainers = report.getNumContainers();
|
||||
usedMemory = report.getUsedResource().getMemory();
|
||||
availableMemory = report.getAvailableResource().getMemory();
|
||||
}
|
||||
|
||||
NodeHealthStatus health = ni.getNodeHealthStatus();
|
||||
NodeInfo info = new NodeInfo(ni, sched);
|
||||
int usedMemory = (int)info.getUsedMemory();
|
||||
int availableMemory = (int)info.getAvailableMemory();
|
||||
tbody.tr().
|
||||
td(ni.getRackName()).
|
||||
td(String.valueOf(ni.getState())).
|
||||
td(String.valueOf(ni.getNodeID().toString())).
|
||||
td().a("http://" + ni.getHttpAddress(), ni.getHttpAddress())._().
|
||||
td(health.getIsNodeHealthy() ? "Healthy" : "Unhealthy").
|
||||
td(Times.format(health.getLastHealthReportTime())).
|
||||
td(String.valueOf(health.getHealthReport())).
|
||||
td(String.valueOf(numContainers)).
|
||||
td(info.getRack()).
|
||||
td(info.getState()).
|
||||
td(info.getNodeId()).
|
||||
td().a("http://" + info.getNodeHTTPAddress(), info.getNodeHTTPAddress())._().
|
||||
td(info.getHealthStatus()).
|
||||
td(Times.format(info.getLastHealthUpdate())).
|
||||
td(info.getHealthReport()).
|
||||
td(String.valueOf(info.getNumContainers())).
|
||||
td().br().$title(String.valueOf(usedMemory))._().
|
||||
_(StringUtils.byteDesc(usedMemory * BYTES_IN_MB))._().
|
||||
td().br().$title(String.valueOf(usedMemory))._().
|
||||
|
|
|
@ -23,6 +23,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.pajoin;
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
|
||||
import org.apache.hadoop.yarn.webapp.WebApp;
|
||||
|
||||
/**
|
||||
|
@ -41,6 +42,9 @@ public class RMWebApp extends WebApp {
|
|||
|
||||
@Override
|
||||
public void setup() {
|
||||
bind(JAXBContextResolver.class);
|
||||
bind(RMWebServices.class);
|
||||
bind(GenericExceptionHandler.class);
|
||||
if (rm != null) {
|
||||
bind(ResourceManager.class).toInstance(rm);
|
||||
bind(RMContext.class).toInstance(rm.getRMContext());
|
||||
|
|
|
@ -0,0 +1,333 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.concurrent.ConcurrentMap;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.ws.rs.GET;
|
||||
import javax.ws.rs.Path;
|
||||
import javax.ws.rs.PathParam;
|
||||
import javax.ws.rs.Produces;
|
||||
import javax.ws.rs.QueryParam;
|
||||
import javax.ws.rs.core.Context;
|
||||
import javax.ws.rs.core.MediaType;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppsInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.CapacitySchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ClusterMetricsInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.FifoSchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodeInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.NodesInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.SchedulerTypeInfo;
|
||||
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
|
||||
import com.google.inject.Inject;
|
||||
import com.google.inject.Singleton;
|
||||
|
||||
@Singleton
|
||||
@Path("/ws/v1/cluster")
|
||||
public class RMWebServices {
|
||||
private static final Log LOG = LogFactory.getLog(RMWebServices.class);
|
||||
private final ResourceManager rm;
|
||||
private static RecordFactory recordFactory = RecordFactoryProvider
|
||||
.getRecordFactory(null);
|
||||
private final ApplicationACLsManager aclsManager;
|
||||
|
||||
@Inject
|
||||
public RMWebServices(final ResourceManager rm,
|
||||
final ApplicationACLsManager aclsManager) {
|
||||
this.rm = rm;
|
||||
this.aclsManager = aclsManager;
|
||||
}
|
||||
|
||||
protected Boolean hasAccess(RMApp app, HttpServletRequest hsr) {
|
||||
// Check for the authorization.
|
||||
String remoteUser = hsr.getRemoteUser();
|
||||
UserGroupInformation callerUGI = null;
|
||||
if (remoteUser != null) {
|
||||
callerUGI = UserGroupInformation.createRemoteUser(remoteUser);
|
||||
}
|
||||
if (callerUGI != null
|
||||
&& !this.aclsManager.checkAccess(callerUGI,
|
||||
ApplicationAccessType.VIEW_APP, app.getUser(),
|
||||
app.getApplicationId())) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ClusterInfo get() {
|
||||
return getClusterInfo();
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/info")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ClusterInfo getClusterInfo() {
|
||||
return new ClusterInfo(this.rm);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/metrics")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public ClusterMetricsInfo getClusterMetricsInfo() {
|
||||
return new ClusterMetricsInfo(this.rm, this.rm.getRMContext());
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/scheduler")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public SchedulerTypeInfo getSchedulerInfo() {
|
||||
ResourceScheduler rs = rm.getResourceScheduler();
|
||||
SchedulerInfo sinfo;
|
||||
if (rs instanceof CapacityScheduler) {
|
||||
CapacityScheduler cs = (CapacityScheduler) rs;
|
||||
CSQueue root = cs.getRootQueue();
|
||||
sinfo = new CapacitySchedulerInfo(root);
|
||||
} else if (rs instanceof FifoScheduler) {
|
||||
sinfo = new FifoSchedulerInfo(this.rm);
|
||||
} else {
|
||||
throw new NotFoundException("Unknown scheduler configured");
|
||||
}
|
||||
return new SchedulerTypeInfo(sinfo);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/nodes")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public NodesInfo getNodes(@QueryParam("state") String filterState,
|
||||
@QueryParam("healthy") String healthState) {
|
||||
ResourceScheduler sched = this.rm.getResourceScheduler();
|
||||
if (sched == null) {
|
||||
throw new NotFoundException("Null ResourceScheduler instance");
|
||||
}
|
||||
|
||||
NodesInfo allNodes = new NodesInfo();
|
||||
for (RMNode ni : this.rm.getRMContext().getRMNodes().values()) {
|
||||
NodeInfo nodeInfo = new NodeInfo(ni, sched);
|
||||
if (filterState != null) {
|
||||
RMNodeState.valueOf(filterState);
|
||||
if (!(nodeInfo.getState().equalsIgnoreCase(filterState))) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if ((healthState != null) && (!healthState.isEmpty())) {
|
||||
LOG.info("heatlh state is : " + healthState);
|
||||
if (!healthState.equalsIgnoreCase("true")
|
||||
&& !healthState.equalsIgnoreCase("false")) {
|
||||
String msg = "Error: You must specify either true or false to query on health";
|
||||
throw new BadRequestException(msg);
|
||||
}
|
||||
if (nodeInfo.isHealthy() != Boolean.parseBoolean(healthState)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
allNodes.add(nodeInfo);
|
||||
}
|
||||
return allNodes;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/nodes/{nodeId}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public NodeInfo getNode(@PathParam("nodeId") String nodeId) {
|
||||
if (nodeId == null || nodeId.isEmpty()) {
|
||||
throw new NotFoundException("nodeId, " + nodeId + ", is empty or null");
|
||||
}
|
||||
ResourceScheduler sched = this.rm.getResourceScheduler();
|
||||
if (sched == null) {
|
||||
throw new NotFoundException("Null ResourceScheduler instance");
|
||||
}
|
||||
NodeId nid = ConverterUtils.toNodeId(nodeId);
|
||||
RMNode ni = this.rm.getRMContext().getRMNodes().get(nid);
|
||||
if (ni == null) {
|
||||
throw new NotFoundException("nodeId, " + nodeId + ", is not found");
|
||||
}
|
||||
return new NodeInfo(ni, sched);
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/apps")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AppsInfo getApps(@Context HttpServletRequest hsr,
|
||||
@QueryParam("state") String stateQuery,
|
||||
@QueryParam("user") String userQuery,
|
||||
@QueryParam("queue") String queueQuery,
|
||||
@QueryParam("limit") String count,
|
||||
@QueryParam("startedTimeBegin") String startedBegin,
|
||||
@QueryParam("startedTimeEnd") String startedEnd,
|
||||
@QueryParam("finishedTimeBegin") String finishBegin,
|
||||
@QueryParam("finishedTimeEnd") String finishEnd) {
|
||||
long num = 0;
|
||||
boolean checkCount = false;
|
||||
boolean checkStart = false;
|
||||
boolean checkEnd = false;
|
||||
long countNum = 0;
|
||||
|
||||
// set values suitable in case both of begin/end not specified
|
||||
long sBegin = 0;
|
||||
long sEnd = Long.MAX_VALUE;
|
||||
long fBegin = 0;
|
||||
long fEnd = Long.MAX_VALUE;
|
||||
|
||||
if (count != null && !count.isEmpty()) {
|
||||
checkCount = true;
|
||||
countNum = Long.parseLong(count);
|
||||
if (countNum <= 0) {
|
||||
throw new BadRequestException("limit value must be greater then 0");
|
||||
}
|
||||
}
|
||||
|
||||
if (startedBegin != null && !startedBegin.isEmpty()) {
|
||||
checkStart = true;
|
||||
sBegin = Long.parseLong(startedBegin);
|
||||
if (sBegin < 0) {
|
||||
throw new BadRequestException("startedTimeBegin must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (startedEnd != null && !startedEnd.isEmpty()) {
|
||||
checkStart = true;
|
||||
sEnd = Long.parseLong(startedEnd);
|
||||
if (sEnd < 0) {
|
||||
throw new BadRequestException("startedTimeEnd must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (sBegin > sEnd) {
|
||||
throw new BadRequestException(
|
||||
"startedTimeEnd must be greater than startTimeBegin");
|
||||
}
|
||||
|
||||
if (finishBegin != null && !finishBegin.isEmpty()) {
|
||||
checkEnd = true;
|
||||
fBegin = Long.parseLong(finishBegin);
|
||||
if (fBegin < 0) {
|
||||
throw new BadRequestException("finishTimeBegin must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (finishEnd != null && !finishEnd.isEmpty()) {
|
||||
checkEnd = true;
|
||||
fEnd = Long.parseLong(finishEnd);
|
||||
if (fEnd < 0) {
|
||||
throw new BadRequestException("finishTimeEnd must be greater than 0");
|
||||
}
|
||||
}
|
||||
if (fBegin > fEnd) {
|
||||
throw new BadRequestException(
|
||||
"finishTimeEnd must be greater than finishTimeBegin");
|
||||
}
|
||||
|
||||
final ConcurrentMap<ApplicationId, RMApp> apps = rm.getRMContext()
|
||||
.getRMApps();
|
||||
AppsInfo allApps = new AppsInfo();
|
||||
for (RMApp rmapp : apps.values()) {
|
||||
if (checkCount && num == countNum) {
|
||||
break;
|
||||
}
|
||||
AppInfo app = new AppInfo(rmapp, hasAccess(rmapp, hsr));
|
||||
|
||||
if (stateQuery != null && !stateQuery.isEmpty()) {
|
||||
RMAppState.valueOf(stateQuery);
|
||||
if (!app.getState().equalsIgnoreCase(stateQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (userQuery != null && !userQuery.isEmpty()) {
|
||||
if (!app.getUser().equals(userQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (queueQuery != null && !queueQuery.isEmpty()) {
|
||||
ResourceScheduler rs = rm.getResourceScheduler();
|
||||
if (rs instanceof CapacityScheduler) {
|
||||
CapacityScheduler cs = (CapacityScheduler) rs;
|
||||
// validate queue exists
|
||||
try {
|
||||
cs.getQueueInfo(queueQuery, false, false);
|
||||
} catch (IOException e) {
|
||||
throw new BadRequestException(e.getMessage());
|
||||
}
|
||||
}
|
||||
if (!app.getQueue().equals(queueQuery)) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (checkStart
|
||||
&& (app.getStartTime() < sBegin || app.getStartTime() > sEnd)) {
|
||||
continue;
|
||||
}
|
||||
if (checkEnd
|
||||
&& (app.getFinishTime() < fBegin || app.getFinishTime() > fEnd)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
allApps.add(app);
|
||||
num++;
|
||||
}
|
||||
return allApps;
|
||||
}
|
||||
|
||||
@GET
|
||||
@Path("/apps/{appid}")
|
||||
@Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML })
|
||||
public AppInfo getApp(@Context HttpServletRequest hsr,
|
||||
@PathParam("appid") String appId) {
|
||||
if (appId == null || appId.isEmpty()) {
|
||||
throw new NotFoundException("appId, " + appId + ", is empty or null");
|
||||
}
|
||||
ApplicationId id;
|
||||
id = ConverterUtils.toApplicationId(recordFactory, appId);
|
||||
if (id == null) {
|
||||
throw new NotFoundException("appId is null");
|
||||
}
|
||||
RMApp app = rm.getRMContext().getRMApps().get(id);
|
||||
if (app == null) {
|
||||
throw new NotFoundException("app with id: " + appId + " not found");
|
||||
}
|
||||
return new AppInfo(app, hasAccess(app, hsr));
|
||||
}
|
||||
|
||||
}
|
|
@ -26,17 +26,16 @@ import javax.servlet.http.HttpServletResponse;
|
|||
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.AppInfo;
|
||||
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||
import org.apache.hadoop.yarn.util.Apps;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
import org.apache.hadoop.yarn.webapp.Controller;
|
||||
import org.apache.hadoop.yarn.webapp.ResponseInfo;
|
||||
|
@ -73,13 +72,14 @@ public class RmController extends Controller {
|
|||
}
|
||||
ApplicationId appID = Apps.toAppID(aid);
|
||||
RMContext context = getInstance(RMContext.class);
|
||||
RMApp app = context.getRMApps().get(appID);
|
||||
if (app == null) {
|
||||
RMApp rmApp = context.getRMApps().get(appID);
|
||||
if (rmApp == null) {
|
||||
// TODO: handle redirect to jobhistory server
|
||||
setStatus(HttpServletResponse.SC_NOT_FOUND);
|
||||
setTitle("Application not found: "+ aid);
|
||||
return;
|
||||
}
|
||||
AppInfo app = new AppInfo(rmApp, true);
|
||||
|
||||
// Check for the authorization.
|
||||
String remoteUser = request().getRemoteUser();
|
||||
|
@ -98,32 +98,22 @@ public class RmController extends Controller {
|
|||
}
|
||||
|
||||
setTitle(join("Application ", aid));
|
||||
String trackingUrl = app.getTrackingUrl();
|
||||
boolean trackingUrlIsNotReady = trackingUrl == null
|
||||
|| trackingUrl.isEmpty() || "N/A".equalsIgnoreCase(trackingUrl);
|
||||
String ui = trackingUrlIsNotReady ? "UNASSIGNED" :
|
||||
(app.getFinishTime() == 0 ? "ApplicationMaster" : "History");
|
||||
|
||||
ResponseInfo info = info("Application Overview").
|
||||
_("User:", app.getUser()).
|
||||
_("Name:", app.getName()).
|
||||
_("State:", app.getState().toString()).
|
||||
_("FinalStatus:", app.getFinalApplicationStatus().toString()).
|
||||
_("State:", app.getState()).
|
||||
_("FinalStatus:", app.getFinalStatus()).
|
||||
_("Started:", Times.format(app.getStartTime())).
|
||||
_("Elapsed:", StringUtils.formatTime(
|
||||
Times.elapsed(app.getStartTime(), app.getFinishTime()))).
|
||||
_("Tracking URL:", trackingUrlIsNotReady ?
|
||||
"#" : join("http://", trackingUrl), ui).
|
||||
_("Diagnostics:", app.getDiagnostics());
|
||||
Container masterContainer = app.getCurrentAppAttempt()
|
||||
.getMasterContainer();
|
||||
if (masterContainer != null) {
|
||||
String url = join("http://", masterContainer.getNodeHttpAddress(),
|
||||
"/node", "/containerlogs/",
|
||||
ConverterUtils.toString(masterContainer.getId()));
|
||||
info._("AM container logs:", url, url);
|
||||
_("Tracking URL:", !app.isTrackingUrlReady() ?
|
||||
"#" : app.getTrackingUrlPretty(), app.getTrackingUI()).
|
||||
_("Diagnostics:", app.getNote());
|
||||
if (app.amContainerLogsExist()) {
|
||||
info._("AM container logs:", app.getAMContainerLogs(), app.getAMContainerLogs());
|
||||
} else {
|
||||
info._("AM container logs:", "AM not yet registered with RM");
|
||||
info._("AM container logs:", "");
|
||||
}
|
||||
render(AppPage.class);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,213 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import static org.apache.hadoop.yarn.util.StringHelper.join;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
|
||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
||||
import org.apache.hadoop.yarn.util.Times;
|
||||
|
||||
@XmlRootElement(name = "app")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AppInfo {
|
||||
|
||||
@XmlTransient
|
||||
protected String appIdNum;
|
||||
@XmlTransient
|
||||
protected boolean trackingUrlIsNotReady;
|
||||
@XmlTransient
|
||||
protected String trackingUrlPretty;
|
||||
@XmlTransient
|
||||
protected boolean amContainerLogsExist = false;
|
||||
@XmlTransient
|
||||
protected ApplicationId applicationId;
|
||||
|
||||
// these are ok for any user to see
|
||||
protected String id;
|
||||
protected String user;
|
||||
protected String name;
|
||||
protected String queue;
|
||||
protected RMAppState state;
|
||||
protected FinalApplicationStatus finalStatus;
|
||||
protected float progress;
|
||||
protected String trackingUI;
|
||||
protected String trackingUrl;
|
||||
protected String diagnostics;
|
||||
protected long clusterId;
|
||||
|
||||
// these are only allowed if acls allow
|
||||
protected long startedTime;
|
||||
protected long finishedTime;
|
||||
protected long elapsedTime;
|
||||
protected String amContainerLogs;
|
||||
protected String amHostHttpAddress;
|
||||
|
||||
public AppInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public AppInfo(RMApp app, Boolean hasAccess, String host) {
|
||||
this(app, hasAccess);
|
||||
}
|
||||
|
||||
public AppInfo(RMApp app, Boolean hasAccess) {
|
||||
|
||||
if (app != null) {
|
||||
String trackingUrl = app.getTrackingUrl();
|
||||
this.trackingUrlIsNotReady = trackingUrl == null || trackingUrl.isEmpty()
|
||||
|| "N/A".equalsIgnoreCase(trackingUrl);
|
||||
this.trackingUI = this.trackingUrlIsNotReady ? "UNASSIGNED" : (app
|
||||
.getFinishTime() == 0 ? "ApplicationMaster" : "History");
|
||||
if (!trackingUrlIsNotReady) {
|
||||
this.trackingUrl = join("http://", trackingUrl);
|
||||
}
|
||||
this.trackingUrlPretty = trackingUrlIsNotReady ? "UNASSIGNED" : join(
|
||||
"http://", trackingUrl);
|
||||
this.applicationId = app.getApplicationId();
|
||||
this.appIdNum = String.valueOf(app.getApplicationId().getId());
|
||||
this.id = app.getApplicationId().toString();
|
||||
this.user = app.getUser().toString();
|
||||
this.name = app.getName().toString();
|
||||
this.queue = app.getQueue().toString();
|
||||
this.state = app.getState();
|
||||
this.progress = app.getProgress() * 100;
|
||||
this.diagnostics = app.getDiagnostics().toString();
|
||||
if (diagnostics == null || diagnostics.isEmpty()) {
|
||||
this.diagnostics = "";
|
||||
}
|
||||
this.finalStatus = app.getFinalApplicationStatus();
|
||||
this.clusterId = ResourceManager.clusterTimeStamp;
|
||||
|
||||
if (hasAccess) {
|
||||
this.startedTime = app.getStartTime();
|
||||
this.finishedTime = app.getFinishTime();
|
||||
this.elapsedTime = Times.elapsed(app.getStartTime(),
|
||||
app.getFinishTime());
|
||||
|
||||
RMAppAttempt attempt = app.getCurrentAppAttempt();
|
||||
if (attempt != null) {
|
||||
Container masterContainer = attempt.getMasterContainer();
|
||||
if (masterContainer != null) {
|
||||
this.amContainerLogsExist = true;
|
||||
String url = join("http://", masterContainer.getNodeHttpAddress(),
|
||||
"/node", "/containerlogs/",
|
||||
ConverterUtils.toString(masterContainer.getId()));
|
||||
this.amContainerLogs = url;
|
||||
this.amHostHttpAddress = masterContainer.getNodeHttpAddress();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public boolean isTrackingUrlReady() {
|
||||
return !this.trackingUrlIsNotReady;
|
||||
}
|
||||
|
||||
public ApplicationId getApplicationId() {
|
||||
return this.applicationId;
|
||||
}
|
||||
|
||||
public String getAppId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public String getAppIdNum() {
|
||||
return this.appIdNum;
|
||||
}
|
||||
|
||||
public String getUser() {
|
||||
return this.user;
|
||||
}
|
||||
|
||||
public String getQueue() {
|
||||
return this.queue;
|
||||
}
|
||||
|
||||
public String getName() {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state.toString();
|
||||
}
|
||||
|
||||
public float getProgress() {
|
||||
return this.progress;
|
||||
}
|
||||
|
||||
public String getTrackingUI() {
|
||||
return this.trackingUI;
|
||||
}
|
||||
|
||||
public String getNote() {
|
||||
return this.diagnostics;
|
||||
}
|
||||
|
||||
public String getFinalStatus() {
|
||||
return this.finalStatus.toString();
|
||||
}
|
||||
|
||||
public String getTrackingUrl() {
|
||||
return this.trackingUrl;
|
||||
}
|
||||
|
||||
public String getTrackingUrlPretty() {
|
||||
return this.trackingUrlPretty;
|
||||
}
|
||||
|
||||
public long getStartTime() {
|
||||
return this.startedTime;
|
||||
}
|
||||
|
||||
public long getFinishTime() {
|
||||
return this.finishedTime;
|
||||
}
|
||||
|
||||
public long getElapsedTime() {
|
||||
return this.elapsedTime;
|
||||
}
|
||||
|
||||
public String getAMContainerLogs() {
|
||||
return this.amContainerLogs;
|
||||
}
|
||||
|
||||
public String getAMHostHttpAddress() {
|
||||
return this.amHostHttpAddress;
|
||||
}
|
||||
|
||||
public boolean amContainerLogsExist() {
|
||||
return this.amContainerLogsExist;
|
||||
}
|
||||
|
||||
public long getClusterId() {
|
||||
return this.clusterId;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "apps")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class AppsInfo {
|
||||
|
||||
protected ArrayList<AppInfo> app = new ArrayList<AppInfo>();
|
||||
|
||||
public AppsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(AppInfo appinfo) {
|
||||
app.add(appinfo);
|
||||
}
|
||||
|
||||
public ArrayList<AppInfo> getApps() {
|
||||
return app;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,107 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.ParentQueue;
|
||||
|
||||
@XmlRootElement(name = "capacityScheduler")
|
||||
@XmlType(name = "capacityScheduler")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class CapacitySchedulerInfo extends SchedulerInfo {
|
||||
|
||||
protected float capacity;
|
||||
protected float usedCapacity;
|
||||
protected float maxCapacity;
|
||||
protected String queueName;
|
||||
protected ArrayList<CapacitySchedulerQueueInfo> queues;
|
||||
|
||||
@XmlTransient
|
||||
static final float EPSILON = 1e-8f;
|
||||
|
||||
public CapacitySchedulerInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public CapacitySchedulerInfo(CSQueue parent) {
|
||||
this.queueName = parent.getQueueName();
|
||||
this.usedCapacity = parent.getUsedCapacity() * 100;
|
||||
this.capacity = parent.getCapacity() * 100;
|
||||
float max = parent.getMaximumCapacity();
|
||||
if (max < EPSILON || max > 1f)
|
||||
max = 1f;
|
||||
this.maxCapacity = max * 100;
|
||||
|
||||
queues = getQueues(parent);
|
||||
}
|
||||
|
||||
public float getCapacity() {
|
||||
return this.capacity;
|
||||
}
|
||||
|
||||
public float getUsedCapacity() {
|
||||
return this.usedCapacity;
|
||||
}
|
||||
|
||||
public float getMaxCapacity() {
|
||||
return this.maxCapacity;
|
||||
}
|
||||
|
||||
public String getQueueName() {
|
||||
return this.queueName;
|
||||
}
|
||||
|
||||
public ArrayList<CapacitySchedulerQueueInfo> getSubQueues() {
|
||||
return this.queues;
|
||||
}
|
||||
|
||||
protected ArrayList<CapacitySchedulerQueueInfo> getQueues(CSQueue parent) {
|
||||
CSQueue parentQueue = parent;
|
||||
ArrayList<CapacitySchedulerQueueInfo> queuesInfo = new ArrayList<CapacitySchedulerQueueInfo>();
|
||||
for (CSQueue queue : parentQueue.getChildQueues()) {
|
||||
float usedCapacity = queue.getUsedCapacity() * 100;
|
||||
float capacity = queue.getCapacity() * 100;
|
||||
String queueName = queue.getQueueName();
|
||||
String queuePath = queue.getQueuePath();
|
||||
float max = queue.getMaximumCapacity();
|
||||
if (max < EPSILON || max > 1f)
|
||||
max = 1f;
|
||||
float maxCapacity = max * 100;
|
||||
String state = queue.getState().toString();
|
||||
CapacitySchedulerQueueInfo info = new CapacitySchedulerQueueInfo(
|
||||
capacity, usedCapacity, maxCapacity, queueName, state, queuePath);
|
||||
|
||||
if (queue instanceof ParentQueue) {
|
||||
info.isParent = true;
|
||||
info.queue = queue;
|
||||
info.subQueues = getQueues(queue);
|
||||
}
|
||||
queuesInfo.add(info);
|
||||
}
|
||||
return queuesInfo;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class CapacitySchedulerQueueInfo {
|
||||
|
||||
@XmlTransient
|
||||
protected String queuePath;
|
||||
@XmlTransient
|
||||
protected Boolean isParent = false;
|
||||
|
||||
// bit odd to store this but makes html easier for now
|
||||
@XmlTransient
|
||||
protected CSQueue queue;
|
||||
|
||||
protected float capacity;
|
||||
protected float usedCapacity;
|
||||
protected float maxCapacity;
|
||||
protected String queueName;
|
||||
protected String state;
|
||||
protected ArrayList<CapacitySchedulerQueueInfo> subQueues;
|
||||
|
||||
CapacitySchedulerQueueInfo() {
|
||||
};
|
||||
|
||||
CapacitySchedulerQueueInfo(float cap, float used, float max, String name,
|
||||
String state, String path) {
|
||||
this.capacity = cap;
|
||||
this.usedCapacity = used;
|
||||
this.maxCapacity = max;
|
||||
this.queueName = name;
|
||||
this.state = state;
|
||||
this.queuePath = path;
|
||||
}
|
||||
|
||||
public Boolean isParent() {
|
||||
return this.isParent;
|
||||
}
|
||||
|
||||
public CSQueue getQueue() {
|
||||
return this.queue;
|
||||
}
|
||||
|
||||
public float getCapacity() {
|
||||
return this.capacity;
|
||||
}
|
||||
|
||||
public float getUsedCapacity() {
|
||||
return this.usedCapacity;
|
||||
}
|
||||
|
||||
public float getMaxCapacity() {
|
||||
return this.maxCapacity;
|
||||
}
|
||||
|
||||
public String getQueueName() {
|
||||
return this.queueName;
|
||||
}
|
||||
|
||||
public String getQueueState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public String getQueuePath() {
|
||||
return this.queuePath;
|
||||
}
|
||||
|
||||
public ArrayList<CapacitySchedulerQueueInfo> getSubQueues() {
|
||||
return this.subQueues;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.util.YarnVersionInfo;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ClusterInfo {
|
||||
|
||||
protected long id;
|
||||
protected long startedOn;
|
||||
protected String state;
|
||||
protected String resourceManagerVersion;
|
||||
protected String resourceManagerBuildVersion;
|
||||
protected String resourceManagerVersionBuiltOn;
|
||||
protected String hadoopVersion;
|
||||
protected String hadoopBuildVersion;
|
||||
protected String hadoopVersionBuiltOn;
|
||||
|
||||
public ClusterInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public ClusterInfo(ResourceManager rm) {
|
||||
long ts = ResourceManager.clusterTimeStamp;
|
||||
|
||||
this.id = ts;
|
||||
this.state = rm.getServiceState().toString();
|
||||
this.startedOn = ts;
|
||||
this.resourceManagerVersion = YarnVersionInfo.getVersion();
|
||||
this.resourceManagerBuildVersion = YarnVersionInfo.getBuildVersion();
|
||||
this.resourceManagerVersionBuiltOn = YarnVersionInfo.getDate();
|
||||
this.hadoopVersion = VersionInfo.getVersion();
|
||||
this.hadoopBuildVersion = VersionInfo.getBuildVersion();
|
||||
this.hadoopVersionBuiltOn = VersionInfo.getDate();
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.state;
|
||||
}
|
||||
|
||||
public String getRMVersion() {
|
||||
return this.resourceManagerVersion;
|
||||
}
|
||||
|
||||
public String getRMBuildVersion() {
|
||||
return this.resourceManagerBuildVersion;
|
||||
}
|
||||
|
||||
public String getRMVersionBuiltOn() {
|
||||
return this.resourceManagerVersionBuiltOn;
|
||||
}
|
||||
|
||||
public String getHadoopVersion() {
|
||||
return this.hadoopVersion;
|
||||
}
|
||||
|
||||
public String getHadoopBuildVersion() {
|
||||
return this.hadoopBuildVersion;
|
||||
}
|
||||
|
||||
public String getHadoopVersionBuiltOn() {
|
||||
return this.hadoopVersionBuiltOn;
|
||||
}
|
||||
|
||||
public long getClusterId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public long getStartedOn() {
|
||||
return this.startedOn;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,114 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ClusterMetrics;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
|
||||
@XmlRootElement(name = "clusterMetrics")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class ClusterMetricsInfo {
|
||||
|
||||
private static final long MB_IN_GB = 1024;
|
||||
|
||||
protected int appsSubmitted;
|
||||
protected long reservedMB;
|
||||
protected long availableMB;
|
||||
protected long allocatedMB;
|
||||
protected int containersAllocated;
|
||||
protected long totalMB;
|
||||
protected int totalNodes;
|
||||
protected int lostNodes;
|
||||
protected int unhealthyNodes;
|
||||
protected int decommissionedNodes;
|
||||
protected int rebootedNodes;
|
||||
|
||||
public ClusterMetricsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public ClusterMetricsInfo(final ResourceManager rm, final RMContext rmContext) {
|
||||
ResourceScheduler rs = rm.getResourceScheduler();
|
||||
QueueMetrics metrics = rs.getRootQueueMetrics();
|
||||
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
|
||||
|
||||
this.appsSubmitted = metrics.getAppsSubmitted();
|
||||
this.reservedMB = metrics.getReservedGB() * MB_IN_GB;
|
||||
this.availableMB = metrics.getAvailableGB() * MB_IN_GB;
|
||||
this.allocatedMB = metrics.getAllocatedGB() * MB_IN_GB;
|
||||
this.containersAllocated = metrics.getAllocatedContainers();
|
||||
this.totalMB = availableMB + reservedMB + allocatedMB;
|
||||
this.totalNodes = clusterMetrics.getNumNMs();
|
||||
this.lostNodes = clusterMetrics.getNumLostNMs();
|
||||
this.unhealthyNodes = clusterMetrics.getUnhealthyNMs();
|
||||
this.decommissionedNodes = clusterMetrics.getNumDecommisionedNMs();
|
||||
this.rebootedNodes = clusterMetrics.getNumRebootedNMs();
|
||||
|
||||
}
|
||||
|
||||
public int getAppsSubmitted() {
|
||||
return this.appsSubmitted;
|
||||
}
|
||||
|
||||
public long getReservedMB() {
|
||||
return this.reservedMB;
|
||||
}
|
||||
|
||||
public long getAvailableMB() {
|
||||
return this.availableMB;
|
||||
}
|
||||
|
||||
public long getAllocatedMB() {
|
||||
return this.allocatedMB;
|
||||
}
|
||||
|
||||
public int getContainersAllocated() {
|
||||
return this.containersAllocated;
|
||||
}
|
||||
|
||||
public long getTotalMB() {
|
||||
return this.totalMB;
|
||||
}
|
||||
|
||||
public int getTotalNodes() {
|
||||
return this.totalNodes;
|
||||
}
|
||||
|
||||
public int getLostNodes() {
|
||||
return this.lostNodes;
|
||||
}
|
||||
|
||||
public int getRebootedNodes() {
|
||||
return this.rebootedNodes;
|
||||
}
|
||||
|
||||
public int getUnhealthyNodes() {
|
||||
return this.unhealthyNodes;
|
||||
}
|
||||
|
||||
public int getDecommissionedNodes() {
|
||||
return this.decommissionedNodes;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
import javax.xml.bind.annotation.XmlType;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.QueueInfo;
|
||||
import org.apache.hadoop.yarn.api.records.QueueState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||
|
||||
@XmlRootElement(name = "fifoScheduler")
|
||||
@XmlType(name = "fifoScheduler")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class FifoSchedulerInfo extends SchedulerInfo {
|
||||
|
||||
protected float capacity;
|
||||
protected float usedCapacity;
|
||||
protected QueueState qstate;
|
||||
protected int minQueueMemoryCapacity;
|
||||
protected int maxQueueMemoryCapacity;
|
||||
protected int numNodes;
|
||||
protected int usedNodeCapacity;
|
||||
protected int availNodeCapacity;
|
||||
protected int totalNodeCapacity;
|
||||
protected int numContainers;
|
||||
|
||||
@XmlTransient
|
||||
protected String qstateFormatted;
|
||||
|
||||
@XmlTransient
|
||||
protected String qName;
|
||||
|
||||
public FifoSchedulerInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public FifoSchedulerInfo(final ResourceManager rm) {
|
||||
|
||||
RMContext rmContext = rm.getRMContext();
|
||||
|
||||
FifoScheduler fs = (FifoScheduler) rm.getResourceScheduler();
|
||||
qName = fs.getQueueInfo("", false, false).getQueueName();
|
||||
QueueInfo qInfo = fs.getQueueInfo(qName, true, true);
|
||||
|
||||
this.usedCapacity = qInfo.getCurrentCapacity();
|
||||
this.capacity = qInfo.getCapacity();
|
||||
this.minQueueMemoryCapacity = fs.getMinimumResourceCapability().getMemory();
|
||||
this.maxQueueMemoryCapacity = fs.getMaximumResourceCapability().getMemory();
|
||||
this.qstate = qInfo.getQueueState();
|
||||
|
||||
this.numNodes = rmContext.getRMNodes().size();
|
||||
this.usedNodeCapacity = 0;
|
||||
this.availNodeCapacity = 0;
|
||||
this.totalNodeCapacity = 0;
|
||||
this.numContainers = 0;
|
||||
|
||||
for (RMNode ni : rmContext.getRMNodes().values()) {
|
||||
SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID());
|
||||
this.usedNodeCapacity += report.getUsedResource().getMemory();
|
||||
this.availNodeCapacity += report.getAvailableResource().getMemory();
|
||||
this.totalNodeCapacity += ni.getTotalCapability().getMemory();
|
||||
this.numContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers();
|
||||
}
|
||||
}
|
||||
|
||||
public int getNumNodes() {
|
||||
return this.numNodes;
|
||||
}
|
||||
|
||||
public int getUsedNodeCapacity() {
|
||||
return this.usedNodeCapacity;
|
||||
}
|
||||
|
||||
public int getAvailNodeCapacity() {
|
||||
return this.availNodeCapacity;
|
||||
}
|
||||
|
||||
public int getTotalNodeCapacity() {
|
||||
return this.totalNodeCapacity;
|
||||
}
|
||||
|
||||
public int getNumContainers() {
|
||||
return this.numContainers;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return this.qstate.toString();
|
||||
}
|
||||
|
||||
public String getQueueName() {
|
||||
return this.qName;
|
||||
}
|
||||
|
||||
public int getMinQueueMemoryCapacity() {
|
||||
return this.minQueueMemoryCapacity;
|
||||
}
|
||||
|
||||
public int getMaxQueueMemoryCapacity() {
|
||||
return this.maxQueueMemoryCapacity;
|
||||
}
|
||||
|
||||
public float getCapacity() {
|
||||
return this.capacity;
|
||||
}
|
||||
|
||||
public float getUsedCapacity() {
|
||||
return this.usedCapacity;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,122 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.yarn.api.records.NodeHealthStatus;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
|
||||
|
||||
@XmlRootElement(name = "node")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class NodeInfo {
|
||||
|
||||
protected String rack;
|
||||
protected RMNodeState state;
|
||||
protected String id;
|
||||
protected String nodeHostName;
|
||||
protected String nodeHTTPAddress;
|
||||
protected String healthStatus;
|
||||
protected long lastHealthUpdate;
|
||||
protected String healthReport;
|
||||
protected int numContainers;
|
||||
protected long usedMemoryMB;
|
||||
protected long availMemoryMB;
|
||||
|
||||
@XmlTransient
|
||||
protected boolean healthy;
|
||||
|
||||
public NodeInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public NodeInfo(RMNode ni, ResourceScheduler sched) {
|
||||
NodeId id = ni.getNodeID();
|
||||
SchedulerNodeReport report = sched.getNodeReport(id);
|
||||
NodeHealthStatus health = ni.getNodeHealthStatus();
|
||||
this.numContainers = 0;
|
||||
this.usedMemoryMB = 0;
|
||||
this.availMemoryMB = 0;
|
||||
if (report != null) {
|
||||
this.numContainers = report.getNumContainers();
|
||||
this.usedMemoryMB = report.getUsedResource().getMemory();
|
||||
this.availMemoryMB = report.getAvailableResource().getMemory();
|
||||
}
|
||||
this.id = id.toString();
|
||||
this.rack = ni.getRackName();
|
||||
this.nodeHostName = ni.getHostName();
|
||||
this.state = ni.getState();
|
||||
this.nodeHTTPAddress = ni.getHttpAddress();
|
||||
this.healthy = health.getIsNodeHealthy();
|
||||
this.healthStatus = health.getIsNodeHealthy() ? "Healthy" : "Unhealthy";
|
||||
this.lastHealthUpdate = health.getLastHealthReportTime();
|
||||
this.healthReport = String.valueOf(health.getHealthReport());
|
||||
}
|
||||
|
||||
public boolean isHealthy() {
|
||||
return this.healthy;
|
||||
}
|
||||
|
||||
public String getRack() {
|
||||
return this.rack;
|
||||
}
|
||||
|
||||
public String getState() {
|
||||
return String.valueOf(this.state);
|
||||
}
|
||||
|
||||
public String getNodeId() {
|
||||
return this.id;
|
||||
}
|
||||
|
||||
public String getNodeHTTPAddress() {
|
||||
return this.nodeHTTPAddress;
|
||||
}
|
||||
|
||||
public String getHealthStatus() {
|
||||
return this.healthStatus;
|
||||
}
|
||||
|
||||
public long getLastHealthUpdate() {
|
||||
return this.lastHealthUpdate;
|
||||
}
|
||||
|
||||
public String getHealthReport() {
|
||||
return this.healthReport;
|
||||
}
|
||||
|
||||
public int getNumContainers() {
|
||||
return this.numContainers;
|
||||
}
|
||||
|
||||
public long getUsedMemory() {
|
||||
return this.usedMemoryMB;
|
||||
}
|
||||
|
||||
public long getAvailableMemory() {
|
||||
return this.availMemoryMB;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import java.util.ArrayList;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "nodes")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class NodesInfo {
|
||||
|
||||
protected ArrayList<NodeInfo> node = new ArrayList<NodeInfo>();
|
||||
|
||||
public NodesInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public void add(NodeInfo nodeinfo) {
|
||||
node.add(nodeinfo);
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlSeeAlso;
|
||||
|
||||
@XmlRootElement
|
||||
@XmlSeeAlso({ CapacitySchedulerInfo.class, FifoSchedulerInfo.class })
|
||||
public class SchedulerInfo {
|
||||
|
||||
public SchedulerInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
|
||||
@XmlRootElement(name = "scheduler")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class SchedulerTypeInfo {
|
||||
protected SchedulerInfo schedulerInfo;
|
||||
|
||||
public SchedulerTypeInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public SchedulerTypeInfo(final SchedulerInfo scheduler) {
|
||||
this.schedulerInfo = scheduler;
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao;
|
||||
|
||||
import javax.xml.bind.annotation.XmlAccessType;
|
||||
import javax.xml.bind.annotation.XmlAccessorType;
|
||||
import javax.xml.bind.annotation.XmlRootElement;
|
||||
import javax.xml.bind.annotation.XmlTransient;
|
||||
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||
|
||||
@XmlRootElement(name = "userMetrics")
|
||||
@XmlAccessorType(XmlAccessType.FIELD)
|
||||
public class UserMetricsInfo {
|
||||
|
||||
private static final long MB_IN_GB = 1024;
|
||||
|
||||
protected int appsSubmitted;
|
||||
protected int runningContainers;
|
||||
protected int pendingContainers;
|
||||
protected int reservedContainers;
|
||||
protected long reservedMB;
|
||||
protected long pendingMB;
|
||||
protected long allocatedMB;
|
||||
|
||||
@XmlTransient
|
||||
protected boolean userMetricsAvailable;
|
||||
|
||||
public UserMetricsInfo() {
|
||||
} // JAXB needs this
|
||||
|
||||
public UserMetricsInfo(final ResourceManager rm, final RMContext rmContext,
|
||||
final String user) {
|
||||
ResourceScheduler rs = rm.getResourceScheduler();
|
||||
QueueMetrics metrics = rs.getRootQueueMetrics();
|
||||
QueueMetrics userMetrics = metrics.getUserMetrics(user);
|
||||
this.userMetricsAvailable = false;
|
||||
|
||||
if (userMetrics != null) {
|
||||
this.userMetricsAvailable = true;
|
||||
this.appsSubmitted = userMetrics.getAppsSubmitted();
|
||||
this.runningContainers = userMetrics.getAllocatedContainers();
|
||||
this.pendingContainers = userMetrics.getPendingContainers();
|
||||
this.reservedContainers = userMetrics.getReservedContainers();
|
||||
this.reservedMB = userMetrics.getReservedGB() * MB_IN_GB;
|
||||
this.pendingMB = userMetrics.getPendingGB() * MB_IN_GB;
|
||||
this.allocatedMB = userMetrics.getAllocatedGB() * MB_IN_GB;
|
||||
}
|
||||
}
|
||||
|
||||
public boolean metricsAvailable() {
|
||||
return userMetricsAvailable;
|
||||
}
|
||||
|
||||
public int getAppsSubmitted() {
|
||||
return this.appsSubmitted;
|
||||
}
|
||||
|
||||
public long getReservedMB() {
|
||||
return this.reservedMB;
|
||||
}
|
||||
|
||||
public long getAllocatedMB() {
|
||||
return this.allocatedMB;
|
||||
}
|
||||
|
||||
public long getPendingMB() {
|
||||
return this.pendingMB;
|
||||
}
|
||||
|
||||
public int getReservedContainers() {
|
||||
return this.reservedContainers;
|
||||
}
|
||||
|
||||
public int getRunningContainers() {
|
||||
return this.runningContainers;
|
||||
}
|
||||
|
||||
public int getPendingContainers() {
|
||||
return this.pendingContainers;
|
||||
}
|
||||
}
|
|
@ -30,7 +30,9 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.recovery.StoreFactory;
|
||||
|
@ -40,12 +42,16 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE
|
|||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptLaunchFailedEvent;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeState;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
|
||||
|
||||
public class MockRM extends ResourceManager {
|
||||
|
||||
public MockRM() {
|
||||
|
@ -59,48 +65,50 @@ public class MockRM extends ResourceManager {
|
|||
rootLogger.setLevel(Level.DEBUG);
|
||||
}
|
||||
|
||||
public void waitForState(ApplicationId appId, RMAppState finalState)
|
||||
public void waitForState(ApplicationId appId, RMAppState finalState)
|
||||
throws Exception {
|
||||
RMApp app = getRMContext().getRMApps().get(appId);
|
||||
Assert.assertNotNull("app shouldn't be null", app);
|
||||
int timeoutSecs = 0;
|
||||
while (!finalState.equals(app.getState()) &&
|
||||
timeoutSecs++ < 20) {
|
||||
System.out.println("App State is : " + app.getState() +
|
||||
" Waiting for state : " + finalState);
|
||||
while (!finalState.equals(app.getState()) && timeoutSecs++ < 20) {
|
||||
System.out.println("App State is : " + app.getState()
|
||||
+ " Waiting for state : " + finalState);
|
||||
Thread.sleep(500);
|
||||
}
|
||||
System.out.println("App State is : " + app.getState());
|
||||
Assert.assertEquals("App state is not correct (timedout)",
|
||||
finalState, app.getState());
|
||||
}
|
||||
|
||||
// get new application id
|
||||
public GetNewApplicationResponse getNewAppId() throws Exception {
|
||||
ClientRMProtocol client = getClientRMService();
|
||||
return client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
|
||||
Assert.assertEquals("App state is not correct (timedout)", finalState,
|
||||
app.getState());
|
||||
}
|
||||
|
||||
//client
|
||||
// get new application id
|
||||
public GetNewApplicationResponse getNewAppId() throws Exception {
|
||||
ClientRMProtocol client = getClientRMService();
|
||||
return client.getNewApplication(Records
|
||||
.newRecord(GetNewApplicationRequest.class));
|
||||
}
|
||||
|
||||
// client
|
||||
public RMApp submitApp(int masterMemory) throws Exception {
|
||||
ClientRMProtocol client = getClientRMService();
|
||||
GetNewApplicationResponse resp = client.getNewApplication(Records.newRecord(GetNewApplicationRequest.class));
|
||||
GetNewApplicationResponse resp = client.getNewApplication(Records
|
||||
.newRecord(GetNewApplicationRequest.class));
|
||||
ApplicationId appId = resp.getApplicationId();
|
||||
|
||||
SubmitApplicationRequest req = Records.newRecord(SubmitApplicationRequest.class);
|
||||
ApplicationSubmissionContext sub =
|
||||
Records.newRecord(ApplicationSubmissionContext.class);
|
||||
|
||||
SubmitApplicationRequest req = Records
|
||||
.newRecord(SubmitApplicationRequest.class);
|
||||
ApplicationSubmissionContext sub = Records
|
||||
.newRecord(ApplicationSubmissionContext.class);
|
||||
sub.setApplicationId(appId);
|
||||
sub.setApplicationName("");
|
||||
sub.setUser("");
|
||||
ContainerLaunchContext clc =
|
||||
Records.newRecord(ContainerLaunchContext.class);
|
||||
Resource capability = Records.newRecord(Resource.class);
|
||||
ContainerLaunchContext clc = Records
|
||||
.newRecord(ContainerLaunchContext.class);
|
||||
Resource capability = Records.newRecord(Resource.class);
|
||||
capability.setMemory(masterMemory);
|
||||
clc.setResource(capability);
|
||||
sub.setAMContainerSpec(clc);
|
||||
req.setApplicationSubmissionContext(sub);
|
||||
|
||||
|
||||
client.submitApplication(req);
|
||||
// make sure app is immediately available after submit
|
||||
waitForState(appId, RMAppState.ACCEPTED);
|
||||
|
@ -113,28 +121,54 @@ public class MockRM extends ResourceManager {
|
|||
return nm;
|
||||
}
|
||||
|
||||
public void sendNodeStarted(MockNM nm) throws Exception {
|
||||
RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get(
|
||||
nm.getNodeId());
|
||||
node.handle(new RMNodeEvent(nm.getNodeId(), RMNodeEventType.STARTED));
|
||||
}
|
||||
|
||||
public void NMwaitForState(NodeId nodeid, RMNodeState finalState)
|
||||
throws Exception {
|
||||
RMNode node = getRMContext().getRMNodes().get(nodeid);
|
||||
Assert.assertNotNull("node shouldn't be null", node);
|
||||
int timeoutSecs = 0;
|
||||
while (!finalState.equals(node.getState()) && timeoutSecs++ < 20) {
|
||||
System.out.println("Node State is : " + node.getState()
|
||||
+ " Waiting for state : " + finalState);
|
||||
Thread.sleep(500);
|
||||
}
|
||||
System.out.println("Node State is : " + node.getState());
|
||||
Assert.assertEquals("Node state is not correct (timedout)", finalState,
|
||||
node.getState());
|
||||
}
|
||||
|
||||
public void killApp(ApplicationId appId) throws Exception {
|
||||
ClientRMProtocol client = getClientRMService();
|
||||
KillApplicationRequest req = Records.newRecord(KillApplicationRequest.class);
|
||||
KillApplicationRequest req = Records
|
||||
.newRecord(KillApplicationRequest.class);
|
||||
req.setApplicationId(appId);
|
||||
client.forceKillApplication(req);
|
||||
}
|
||||
|
||||
//from AMLauncher
|
||||
public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId) throws Exception {
|
||||
// from AMLauncher
|
||||
public MockAM sendAMLaunched(ApplicationAttemptId appAttemptId)
|
||||
throws Exception {
|
||||
MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
|
||||
am.waitForState(RMAppAttemptState.ALLOCATED);
|
||||
getRMContext().getDispatcher().getEventHandler().handle(
|
||||
new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED));
|
||||
getRMContext()
|
||||
.getDispatcher()
|
||||
.getEventHandler()
|
||||
.handle(
|
||||
new RMAppAttemptEvent(appAttemptId, RMAppAttemptEventType.LAUNCHED));
|
||||
return am;
|
||||
}
|
||||
|
||||
|
||||
public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId) throws Exception {
|
||||
public void sendAMLaunchFailed(ApplicationAttemptId appAttemptId)
|
||||
throws Exception {
|
||||
MockAM am = new MockAM(getRMContext(), masterService, appAttemptId);
|
||||
am.waitForState(RMAppAttemptState.ALLOCATED);
|
||||
getRMContext().getDispatcher().getEventHandler().handle(
|
||||
new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed"));
|
||||
getRMContext().getDispatcher().getEventHandler()
|
||||
.handle(new RMAppAttemptLaunchFailedEvent(appAttemptId, "Failed"));
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -143,8 +177,9 @@ public class MockRM extends ResourceManager {
|
|||
rmAppManager, applicationACLsManager) {
|
||||
@Override
|
||||
public void start() {
|
||||
//override to not start rpc handler
|
||||
// override to not start rpc handler
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// don't do anything
|
||||
|
@ -155,11 +190,12 @@ public class MockRM extends ResourceManager {
|
|||
@Override
|
||||
protected ResourceTrackerService createResourceTrackerService() {
|
||||
return new ResourceTrackerService(getRMContext(), nodesListManager,
|
||||
this.nmLivelinessMonitor, this.containerTokenSecretManager){
|
||||
this.nmLivelinessMonitor, this.containerTokenSecretManager) {
|
||||
@Override
|
||||
public void start() {
|
||||
//override to not start rpc handler
|
||||
// override to not start rpc handler
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// don't do anything
|
||||
|
@ -173,8 +209,9 @@ public class MockRM extends ResourceManager {
|
|||
this.appTokenSecretManager, scheduler) {
|
||||
@Override
|
||||
public void start() {
|
||||
//override to not start rpc handler
|
||||
// override to not start rpc handler
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// don't do anything
|
||||
|
@ -184,17 +221,18 @@ public class MockRM extends ResourceManager {
|
|||
|
||||
@Override
|
||||
protected ApplicationMasterLauncher createAMLauncher() {
|
||||
return new ApplicationMasterLauncher(
|
||||
this.appTokenSecretManager, this.clientToAMSecretManager,
|
||||
getRMContext()) {
|
||||
return new ApplicationMasterLauncher(this.appTokenSecretManager,
|
||||
this.clientToAMSecretManager, getRMContext()) {
|
||||
@Override
|
||||
public void start() {
|
||||
//override to not start rpc handler
|
||||
// override to not start rpc handler
|
||||
}
|
||||
|
||||
@Override
|
||||
public void handle(AMLauncherEvent appEvent) {
|
||||
//don't do anything
|
||||
public void handle(AMLauncherEvent appEvent) {
|
||||
// don't do anything
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// don't do anything
|
||||
|
@ -203,31 +241,31 @@ public class MockRM extends ResourceManager {
|
|||
}
|
||||
|
||||
@Override
|
||||
protected AdminService createAdminService(
|
||||
ClientRMService clientRMService,
|
||||
protected AdminService createAdminService(ClientRMService clientRMService,
|
||||
ApplicationMasterService applicationMasterService,
|
||||
ResourceTrackerService resourceTrackerService) {
|
||||
return new AdminService(
|
||||
getConfig(), scheduler, getRMContext(), this.nodesListManager,
|
||||
clientRMService, applicationMasterService, resourceTrackerService){
|
||||
return new AdminService(getConfig(), scheduler, getRMContext(),
|
||||
this.nodesListManager, clientRMService, applicationMasterService,
|
||||
resourceTrackerService) {
|
||||
@Override
|
||||
public void start() {
|
||||
//override to not start rpc handler
|
||||
// override to not start rpc handler
|
||||
}
|
||||
|
||||
@Override
|
||||
public void stop() {
|
||||
// don't do anything
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
public NodesListManager getNodesListManager() {
|
||||
return this.nodesListManager;
|
||||
}
|
||||
|
||||
@Override
|
||||
protected void startWepApp() {
|
||||
//override to disable webapp
|
||||
// override to disable webapp
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -284,6 +284,11 @@ public abstract class MockAsm extends MockApps {
|
|||
public FinalApplicationStatus getFinalApplicationStatus() {
|
||||
return FinalApplicationStatus.UNDEFINED;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RMAppAttempt getCurrentAppAttempt() {
|
||||
return null;
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -168,7 +168,7 @@
|
|||
<dependency>
|
||||
<groupId>com.google.inject.extensions</groupId>
|
||||
<artifactId>guice-servlet</artifactId>
|
||||
<version>2.0</version>
|
||||
<version>3.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
@ -289,9 +289,30 @@
|
|||
<scope>runtime</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.inject.extensions</groupId>
|
||||
<artifactId>guice-servlet</artifactId>
|
||||
<version>2.0</version>
|
||||
<groupId>com.google.inject</groupId>
|
||||
<artifactId>guice</artifactId>
|
||||
<version>3.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey.jersey-test-framework</groupId>
|
||||
<artifactId>jersey-test-framework-core</artifactId>
|
||||
<version>1.8</version>
|
||||
<scope>test</scope>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey.jersey-test-framework</groupId>
|
||||
<artifactId>jersey-test-framework-grizzly2</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey.contribs</groupId>
|
||||
<artifactId>jersey-guice</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jboss.netty</groupId>
|
||||
|
|
|
@ -142,10 +142,25 @@
|
|||
<artifactId>hadoop-hdfs</artifactId>
|
||||
<version>${project.version}</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.inject</groupId>
|
||||
<artifactId>guice</artifactId>
|
||||
<version>3.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey</groupId>
|
||||
<artifactId>jersey-server</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.sun.jersey.contribs</groupId>
|
||||
<artifactId>jersey-guice</artifactId>
|
||||
<version>1.8</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>com.google.inject.extensions</groupId>
|
||||
<artifactId>guice-servlet</artifactId>
|
||||
<version>2.0</version>
|
||||
<version>3.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>junit</groupId>
|
||||
|
|
Loading…
Reference in New Issue