> mainList = html.
div("#nav").
h3("Cluster").
ul().
li().a(url("cluster"), "About")._().
- li().a(url("nodes"), "Nodes")._().
- li().a(url("apps"), "Applications")._().
+ li().a(url("nodes"), "Nodes")._();
+ UL
>>> subAppsList = mainList.
+ li().a(url("apps"), "Applications").
+ ul();
+ subAppsList.li()._();
+ for (RMAppState state : RMAppState.values()) {
+ subAppsList.
+ li().a(url("apps", state.toString()), state.toString())._();
+ }
+ subAppsList._()._();
+ mainList.
li().a(url("scheduler"), "Scheduler")._()._().
h3("Tools").
ul().
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
index 74266a0015c..90b0824f0cc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebApp.java
@@ -25,14 +25,12 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.webapp.GenericExceptionHandler;
import org.apache.hadoop.yarn.webapp.WebApp;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
/**
* The RM webapp
*/
-public class RMWebApp extends WebApp {
- static final String APP_ID = "app.id";
- static final String QUEUE_NAME = "queue.name";
- static final String NODE_STATE = "node.state";
+public class RMWebApp extends WebApp implements YarnWebParams {
private final ResourceManager rm;
@@ -53,9 +51,9 @@ public class RMWebApp extends WebApp {
}
route("/", RmController.class);
route(pajoin("/nodes", NODE_STATE), RmController.class, "nodes");
- route("/apps", RmController.class);
+ route(pajoin("/apps", APP_STATE), RmController.class);
route("/cluster", RmController.class, "about");
- route(pajoin("/app", APP_ID), RmController.class, "app");
+ route(pajoin("/app", APPLICATION_ID), RmController.class, "app");
route("/scheduler", RmController.class, "scheduler");
route(pajoin("/queue", QUEUE_NAME), RmController.class, "queue");
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
index bb3ff674edc..c8778b87be8 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmController.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.server.resourcemanager.webapp;
-import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.APP_ID;
import static org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp.QUEUE_NAME;
import static org.apache.hadoop.yarn.util.StringHelper.join;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID;
import javax.servlet.http.HttpServletResponse;
@@ -64,7 +64,7 @@ public class RmController extends Controller {
}
public void app() {
- String aid = $(APP_ID);
+ String aid = $(APPLICATION_ID);
if (aid.isEmpty()) {
setStatus(HttpServletResponse.SC_BAD_REQUEST);
setTitle("Bad request: requires application ID");
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
index 131952eb04b..47532c1562d 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RmView.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp;
import org.apache.hadoop.yarn.webapp.SubView;
import org.apache.hadoop.yarn.webapp.view.TwoColumnLayout;
+import static org.apache.hadoop.yarn.util.StringHelper.sjoin;
+import static org.apache.hadoop.yarn.webapp.YarnWebParams.APP_STATE;
import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*;
// Do NOT rename/refactor this to RMView as it will wreak havoc
@@ -36,10 +38,14 @@ public class RmView extends TwoColumnLayout {
set(DATATABLES_ID, "apps");
set(initID(DATATABLES, "apps"), appsTableInit());
setTableStyles(html, "apps", ".queue {width:6em}", ".ui {width:8em}");
+
+ // Set the correct title.
+ String reqState = $(APP_STATE);
+ reqState = (reqState == null || reqState.isEmpty() ? "All" : reqState);
+ setTitle(sjoin(reqState, "Applications"));
}
protected void commonPreHead(Page.HTML<_> html) {
- //html.meta_http("refresh", "20");
set(ACCORDION_ID, "nav");
set(initID(ACCORDION, "nav"), "{autoHeight:false, active:0}");
set(THEMESWITCHER_ID, "themeswitcher");
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
index 7d63b057a19..d8db4bb99bc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
@@ -31,8 +31,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
@XmlAccessorType(XmlAccessType.FIELD)
public class ClusterMetricsInfo {
- private static final long MB_IN_GB = 1024;
-
protected int appsSubmitted;
protected long reservedMB;
protected long availableMB;
@@ -55,9 +53,9 @@ public class ClusterMetricsInfo {
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
this.appsSubmitted = metrics.getAppsSubmitted();
- this.reservedMB = metrics.getReservedGB() * MB_IN_GB;
- this.availableMB = metrics.getAvailableGB() * MB_IN_GB;
- this.allocatedMB = metrics.getAllocatedGB() * MB_IN_GB;
+ this.reservedMB = metrics.getReservedMB();
+ this.availableMB = metrics.getAvailableMB();
+ this.allocatedMB = metrics.getAllocatedMB();
this.containersAllocated = metrics.getAllocatedContainers();
this.totalMB = availableMB + reservedMB + allocatedMB;
this.activeNodes = clusterMetrics.getNumActiveNMs();
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
index 27e6a646d64..a6c1fcaac9f 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/UserMetricsInfo.java
@@ -31,8 +31,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler
@XmlAccessorType(XmlAccessType.FIELD)
public class UserMetricsInfo {
- private static final long MB_IN_GB = 1024;
-
protected int appsSubmitted;
protected int runningContainers;
protected int pendingContainers;
@@ -60,9 +58,9 @@ public class UserMetricsInfo {
this.runningContainers = userMetrics.getAllocatedContainers();
this.pendingContainers = userMetrics.getPendingContainers();
this.reservedContainers = userMetrics.getReservedContainers();
- this.reservedMB = userMetrics.getReservedGB() * MB_IN_GB;
- this.pendingMB = userMetrics.getPendingGB() * MB_IN_GB;
- this.allocatedMB = userMetrics.getAllocatedGB() * MB_IN_GB;
+ this.reservedMB = userMetrics.getReservedMB();
+ this.pendingMB = userMetrics.getPendingMB();
+ this.allocatedMB = userMetrics.getAllocatedMB();
}
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
index 968ad90160b..71d5fcde681 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/resources/capacity-scheduler.xml
@@ -53,9 +53,9 @@
yarn.scheduler.capacity.root.default.maximum-capacity
- -1
+ 100
- The maximum capacity of the default queue. A value of -1 disables this.
+ The maximum capacity of the default queue.
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
index 659bf55a5e0..fd41d91f6a6 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java
@@ -22,14 +22,14 @@ import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationMaster;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
-import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.ApplicationStatus;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent;
@@ -41,7 +41,6 @@ import com.google.common.collect.Lists;
@InterfaceAudience.Private
public abstract class MockAsm extends MockApps {
- static final int DT = 1000000; // ms
public static class AppMasterBase implements ApplicationMaster {
@Override
@@ -232,9 +231,10 @@ public abstract class MockAsm extends MockApps {
final String user = newUserName();
final String name = newAppName();
final String queue = newQueue();
- final long start = System.currentTimeMillis() - (int)(Math.random()*DT);
- final long finish = Math.random() < 0.5 ? 0 :
- System.currentTimeMillis() + (int)(Math.random()*DT);
+ final long start = 123456 + i * 1000;
+ final long finish = 234567 + i * 1000;
+ RMAppState[] allStates = RMAppState.values();
+ final RMAppState state = allStates[i % allStates.length];
return new ApplicationBase() {
@Override
public ApplicationId getApplicationId() {
@@ -270,7 +270,7 @@ public abstract class MockAsm extends MockApps {
}
@Override
public RMAppState getState() {
- return RMAppState.RUNNING;
+ return state;
}
@Override
public StringBuilder getDiagnostics() {
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
index 0016b5efed3..3d6bd37e57e 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java
@@ -57,16 +57,16 @@ public class TestQueueMetrics {
metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
- checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
+ checkResources(queueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
metrics.incrAppsRunning(user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0);
metrics.allocateResources(user, 3, Resources.createResource(2*GB));
- checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 0, 0);
+ checkResources(queueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 0, 0);
metrics.releaseResources(user, 1, Resources.createResource(2*GB));
- checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
+ checkResources(queueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
metrics.finishApp(app, RMAppAttemptState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -92,20 +92,20 @@ public class TestQueueMetrics {
metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
- checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
- checkResources(userSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
+ checkResources(queueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
+ checkResources(userSource, 0, 0, 0, 0, 10*GB, 15*GB, 5, 0, 0);
metrics.incrAppsRunning(user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0);
checkApps(userSource, 1, 0, 1, 0, 0, 0);
metrics.allocateResources(user, 3, Resources.createResource(2*GB));
- checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 0, 0);
- checkResources(userSource, 6, 3, 3, 0, 10, 9, 2, 0, 0);
+ checkResources(queueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(userSource, 6*GB, 3, 3, 0, 10*GB, 9*GB, 2, 0, 0);
metrics.releaseResources(user, 1, Resources.createResource(2*GB));
- checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
- checkResources(userSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
+ checkResources(queueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(userSource, 4*GB, 2, 3, 1, 10*GB, 9*GB, 2, 0, 0);
metrics.finishApp(app, RMAppAttemptState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -141,10 +141,10 @@ public class TestQueueMetrics {
parentMetrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
metrics.setAvailableResourcesToUser(user, Resources.createResource(10*GB));
metrics.incrPendingResources(user, 5, Resources.createResource(15*GB));
- checkResources(queueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
- checkResources(parentQueueSource, 0, 0, 0, 0, 100, 15, 5, 0, 0);
- checkResources(userSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
- checkResources(parentUserSource, 0, 0, 0, 0, 10, 15, 5, 0, 0);
+ checkResources(queueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
+ checkResources(parentQueueSource, 0, 0, 0, 0, 100*GB, 15*GB, 5, 0, 0);
+ checkResources(userSource, 0, 0, 0, 0, 10*GB, 15*GB, 5, 0, 0);
+ checkResources(parentUserSource, 0, 0, 0, 0, 10*GB, 15*GB, 5, 0, 0);
metrics.incrAppsRunning(user);
checkApps(queueSource, 1, 0, 1, 0, 0, 0);
@@ -154,17 +154,17 @@ public class TestQueueMetrics {
metrics.reserveResource(user, Resources.createResource(3*GB));
// Available resources is set externally, as it depends on dynamic
// configurable cluster/queue resources
- checkResources(queueSource, 6, 3, 3, 0, 100, 9, 2, 3, 1);
- checkResources(parentQueueSource, 6, 3, 3, 0, 100, 9, 2, 3, 1);
- checkResources(userSource, 6, 3, 3, 0, 10, 9, 2, 3, 1);
- checkResources(parentUserSource, 6, 3, 3, 0, 10, 9, 2, 3, 1);
+ checkResources(queueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 3*GB, 1);
+ checkResources(parentQueueSource, 6*GB, 3, 3, 0, 100*GB, 9*GB, 2, 3*GB, 1);
+ checkResources(userSource, 6*GB, 3, 3, 0, 10*GB, 9*GB, 2, 3*GB, 1);
+ checkResources(parentUserSource, 6*GB, 3, 3, 0, 10*GB, 9*GB, 2, 3*GB, 1);
metrics.releaseResources(user, 1, Resources.createResource(2*GB));
metrics.unreserveResource(user, Resources.createResource(3*GB));
- checkResources(queueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
- checkResources(parentQueueSource, 4, 2, 3, 1, 100, 9, 2, 0, 0);
- checkResources(userSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
- checkResources(parentUserSource, 4, 2, 3, 1, 10, 9, 2, 0, 0);
+ checkResources(queueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(parentQueueSource, 4*GB, 2, 3, 1, 100*GB, 9*GB, 2, 0, 0);
+ checkResources(userSource, 4*GB, 2, 3, 1, 10*GB, 9*GB, 2, 0, 0);
+ checkResources(parentUserSource, 4*GB, 2, 3, 1, 10*GB, 9*GB, 2, 0, 0);
metrics.finishApp(app, RMAppAttemptState.FINISHED);
checkApps(queueSource, 1, 0, 0, 1, 0, 0);
@@ -184,18 +184,19 @@ public class TestQueueMetrics {
assertCounter("AppsKilled", killed, rb);
}
- public static void checkResources(MetricsSource source, int allocGB,
- int allocCtnrs, long aggreAllocCtnrs, long aggreReleasedCtnrs, int availGB, int pendingGB, int pendingCtnrs,
- int reservedGB, int reservedCtnrs) {
+ public static void checkResources(MetricsSource source, int allocatedMB,
+ int allocCtnrs, long aggreAllocCtnrs, long aggreReleasedCtnrs,
+ int availableMB, int pendingMB, int pendingCtnrs,
+ int reservedMB, int reservedCtnrs) {
MetricsRecordBuilder rb = getMetrics(source);
- assertGauge("AllocatedGB", allocGB, rb);
+ assertGauge("AllocatedMB", allocatedMB, rb);
assertGauge("AllocatedContainers", allocCtnrs, rb);
assertCounter("AggregateContainersAllocated", aggreAllocCtnrs, rb);
assertCounter("AggregateContainersReleased", aggreReleasedCtnrs, rb);
- assertGauge("AvailableGB", availGB, rb);
- assertGauge("PendingGB", pendingGB, rb);
+ assertGauge("AvailableMB", availableMB, rb);
+ assertGauge("PendingMB", pendingMB, rb);
assertGauge("PendingContainers", pendingCtnrs, rb);
- assertGauge("ReservedGB", reservedGB, rb);
+ assertGauge("ReservedMB", reservedMB, rb);
assertGauge("ReservedContainers", reservedCtnrs, rb);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
index 5e4243cccb7..9db0288ad58 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java
@@ -251,7 +251,7 @@ public class TestLeafQueue {
// Only 1 container
a.assignContainers(clusterResource, node_0);
- assertEquals(7, a.getMetrics().getAvailableGB());
+ assertEquals(7*GB, a.getMetrics().getAvailableMB());
}
@@ -307,9 +307,9 @@ public class TestLeafQueue {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
- assertEquals(0, a.getMetrics().getAvailableGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
+ assertEquals(0*GB, a.getMetrics().getAvailableMB());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
@@ -317,16 +317,16 @@ public class TestLeafQueue {
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Can't allocate 3rd due to user-limit
a.assignContainers(clusterResource, node_0);
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Bump up user-limit-factor, now allocate should work
a.setUserLimitFactor(10);
@@ -334,16 +334,16 @@ public class TestLeafQueue {
assertEquals(3*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(3, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(3*GB, a.getMetrics().getAllocatedMB());
// One more should work, for app_1, due to user-limit-factor
a.assignContainers(clusterResource, node_0);
assertEquals(4*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(4, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(4*GB, a.getMetrics().getAllocatedMB());
// Test max-capacity
// Now - no more allocs since we are at max-cap
@@ -352,8 +352,8 @@ public class TestLeafQueue {
assertEquals(4*GB, a.getUsedResources().getMemory());
assertEquals(3*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(4, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(4*GB, a.getMetrics().getAllocatedMB());
// Release each container from app_0
for (RMContainer rmContainer : app_0.getLiveContainers()) {
@@ -363,8 +363,8 @@ public class TestLeafQueue {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(1*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
// Release each container from app_1
for (RMContainer rmContainer : app_1.getLiveContainers()) {
@@ -374,9 +374,9 @@ public class TestLeafQueue {
assertEquals(0*GB, a.getUsedResources().getMemory());
assertEquals(0*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(0, a.getMetrics().getAllocatedGB());
- assertEquals(1, a.getMetrics().getAvailableGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(0*GB, a.getMetrics().getAllocatedMB());
+ assertEquals(1*GB, a.getMetrics().getAvailableMB());
}
@Test
@@ -700,9 +700,9 @@ public class TestLeafQueue {
assertEquals(1*GB, a.getUsedResources().getMemory());
assertEquals(1*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
- assertEquals(0, a.getMetrics().getAvailableGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
+ assertEquals(0*GB, a.getMetrics().getAvailableMB());
// Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also
// you can get one container more than user-limit
@@ -710,8 +710,8 @@ public class TestLeafQueue {
assertEquals(2*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Now, reservation should kick in for app_1
a.assignContainers(clusterResource, node_0);
@@ -720,8 +720,8 @@ public class TestLeafQueue {
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
assertEquals(2*GB, node_0.getUsedResource().getMemory());
- assertEquals(4, a.getMetrics().getReservedGB());
- assertEquals(2, a.getMetrics().getAllocatedGB());
+ assertEquals(4*GB, a.getMetrics().getReservedMB());
+ assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// Now free 1 container from app_0 i.e. 1G
a.completedContainer(clusterResource, app_0, node_0,
@@ -732,8 +732,8 @@ public class TestLeafQueue {
assertEquals(0*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(4*GB, app_1.getCurrentReservation().getMemory());
assertEquals(1*GB, node_0.getUsedResource().getMemory());
- assertEquals(4, a.getMetrics().getReservedGB());
- assertEquals(1, a.getMetrics().getAllocatedGB());
+ assertEquals(4*GB, a.getMetrics().getReservedMB());
+ assertEquals(1*GB, a.getMetrics().getAllocatedMB());
// Now finish another container from app_0 and fulfill the reservation
a.completedContainer(clusterResource, app_0, node_0,
@@ -744,8 +744,8 @@ public class TestLeafQueue {
assertEquals(4*GB, app_1.getCurrentConsumption().getMemory());
assertEquals(0*GB, app_1.getCurrentReservation().getMemory());
assertEquals(4*GB, node_0.getUsedResource().getMemory());
- assertEquals(0, a.getMetrics().getReservedGB());
- assertEquals(4, a.getMetrics().getAllocatedGB());
+ assertEquals(0*GB, a.getMetrics().getReservedMB());
+ assertEquals(4*GB, a.getMetrics().getAllocatedMB());
}
@Test
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
index 5a492340d00..73554b0e595 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebApp.java
@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.resourcemanager.applicationsmanager.MockAsm;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemStore;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
+import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
@@ -45,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.webapp.WebApps;
+import org.apache.hadoop.yarn.webapp.YarnWebParams;
import org.apache.hadoop.yarn.webapp.test.WebAppTests;
import org.junit.Test;
@@ -74,7 +76,7 @@ public class TestRMWebApp {
@Test public void testView() {
Injector injector = WebAppTests.createMockInjector(RMContext.class,
- mockRMContext(3, 1, 2, 8*GiB),
+ mockRMContext(15, 1, 2, 8*GiB),
new Module() {
@Override
public void configure(Binder binder) {
@@ -85,7 +87,9 @@ public class TestRMWebApp {
}
}
});
- injector.getInstance(RmView.class).render();
+ RmView rmViewInstance = injector.getInstance(RmView.class);
+ rmViewInstance.set(YarnWebParams.APP_STATE, RMAppState.RUNNING.toString());
+ rmViewInstance.render();
WebAppTests.flushOutput(injector);
}
diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
index 746eec234e9..d09645a97dc 100644
--- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
+++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
@@ -398,19 +398,19 @@ public class TestRMWebServices extends JerseyTest {
ResourceScheduler rs = rm.getResourceScheduler();
QueueMetrics metrics = rs.getRootQueueMetrics();
ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
- final long MB_IN_GB = 1024;
- long totalMBExpect = (metrics.getReservedGB() * MB_IN_GB)
- + (metrics.getAvailableGB() * MB_IN_GB)
- + (metrics.getAllocatedGB() * MB_IN_GB);
+ long totalMBExpect =
+ metrics.getReservedMB()+ metrics.getAvailableMB()
+ + metrics.getAllocatedMB();
- assertEquals("appsSubmitted doesn't match", metrics.getAppsSubmitted(), sub);
+ assertEquals("appsSubmitted doesn't match",
+ metrics.getAppsSubmitted(), sub);
assertEquals("reservedMB doesn't match",
- metrics.getReservedGB() * MB_IN_GB, reservedMB);
- assertEquals("availableMB doesn't match", metrics.getAvailableGB()
- * MB_IN_GB, availableMB);
- assertEquals("allocatedMB doesn't match", metrics.getAllocatedGB()
- * MB_IN_GB, allocMB);
+ metrics.getReservedMB(), reservedMB);
+ assertEquals("availableMB doesn't match",
+ metrics.getAvailableMB(), availableMB);
+ assertEquals("allocatedMB doesn't match",
+ metrics.getAllocatedMB(), allocMB);
assertEquals("containersAllocated doesn't match", 0, containersAlloc);
assertEquals("totalMB doesn't match", totalMBExpect, totalMB);
assertEquals(
diff --git a/hadoop-mapreduce-project/ivy/libraries.properties b/hadoop-mapreduce-project/ivy/libraries.properties
index 0d693345552..45e691310aa 100644
--- a/hadoop-mapreduce-project/ivy/libraries.properties
+++ b/hadoop-mapreduce-project/ivy/libraries.properties
@@ -81,6 +81,6 @@ wagon-http.version=1.0-beta-2
xmlenc.version=0.52
xerces.version=1.4.4
-jackson.version=1.8.2
+jackson.version=1.8.8
yarn.version=0.23.1-SNAPSHOT
hadoop-mapreduce.version=0.23.1-SNAPSHOT
diff --git a/hadoop-mapreduce-project/src/java/mapred-default.xml b/hadoop-mapreduce-project/src/java/mapred-default.xml
index 79605eb1699..0d0a91d7787 100644
--- a/hadoop-mapreduce-project/src/java/mapred-default.xml
+++ b/hadoop-mapreduce-project/src/java/mapred-default.xml
@@ -433,18 +433,6 @@
-
- mapreduce.task.tmp.dir
- ./tmp
- To set the value of tmp directory for map and reduce tasks.
- If the value is an absolute path, it is directly assigned. Otherwise, it is
- prepended with task's working directory. The java tasks are executed with
- option -Djava.io.tmpdir='the absolute path of the tmp dir'. Pipes and
- streaming are set with environment variable,
- TMPDIR='the absolute path of the tmp dir'
-
-
-
mapreduce.map.log.level
INFO
diff --git a/hadoop-minicluster/pom.xml b/hadoop-minicluster/pom.xml
new file mode 100644
index 00000000000..68cd4b67c9d
--- /dev/null
+++ b/hadoop-minicluster/pom.xml
@@ -0,0 +1,105 @@
+
+
+
+ 4.0.0
+
+ org.apache.hadoop
+ hadoop-project
+ 0.23.1-SNAPSHOT
+ ../hadoop-project
+
+ org.apache.hadoop
+ hadoop-minicluster
+ 0.23.1-SNAPSHOT
+ jar
+
+ Apache Hadoop Mini-Cluster
+ Apache Hadoop Mini-Cluster
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ compile
+ test-jar
+
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ compile
+ test-jar
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-server-tests
+ compile
+ test-jar
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-jobclient
+ compile
+ test-jar
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ compile
+
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ compile
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-app
+ compile
+
+
+
+ org.apache.hadoop
+ hadoop-yarn-api
+ compile
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-core
+ compile
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-jobclient
+ compile
+
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-hs
+ compile
+
+
+
+
+
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index e3b89fc11c5..55763f8fdbd 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -524,7 +524,7 @@
org.codehaus.jackson
jackson-mapper-asl
- 1.7.1
+ 1.8.8
org.aspectj
diff --git a/hadoop-tools/hadoop-streaming/pom.xml b/hadoop-tools/hadoop-streaming/pom.xml
index 123d9ecf1be..2981765c0be 100644
--- a/hadoop-tools/hadoop-streaming/pom.xml
+++ b/hadoop-tools/hadoop-streaming/pom.xml
@@ -29,7 +29,7 @@
${project.build.directory}/log
- %regex[.*(TestStreamingBadRecords|TestStreamingStatus|TestUlimit).*]
+ %regex[.*(TestStreamingStatus).*]
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java
index be10235dc68..7b7901faad1 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestStreamingBadRecords.java
@@ -154,6 +154,10 @@ public class TestStreamingBadRecords extends ClusterMapReduceTestCase
}
}
+ /*
+ * Disable test as skipping bad records not supported in 0.23
+ */
+ /*
public void testSkip() throws Exception {
JobConf clusterConf = createJobConf();
createInput();
@@ -195,7 +199,12 @@ public class TestStreamingBadRecords extends ClusterMapReduceTestCase
//validate that there is no skip directory as it has been set to "none"
assertTrue(SkipBadRecords.getSkipOutputPath(job.jobConf_)==null);
}
+ */
+ /*
+ * Disable test as skipping bad records not supported in 0.23
+ */
+ /*
public void testNarrowDown() throws Exception {
createInput();
JobConf clusterConf = createJobConf();
@@ -231,6 +240,11 @@ public class TestStreamingBadRecords extends ClusterMapReduceTestCase
validateOutput(job.running_, true);
assertTrue(SkipBadRecords.getSkipOutputPath(job.jobConf_)!=null);
}
+ */
+
+ public void testNoOp() {
+ // Added to avoid warnings when running this disabled test
+ }
static class App{
boolean isReducer;
diff --git a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java
index 9d35d7ae68f..89b4d49a231 100644
--- a/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java
+++ b/hadoop-tools/hadoop-streaming/src/test/java/org/apache/hadoop/streaming/TestUlimit.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.mapreduce.MapReduceTestUtil;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
import org.apache.hadoop.util.StringUtils;
+import org.junit.Ignore;
import org.junit.Test;
import static org.junit.Assert.*;
@@ -52,7 +53,6 @@ public class TestUlimit {
private static String SET_MEMORY_LIMIT = "786432"; // 768MB
String[] genArgs(String memLimit) {
- String strJobtracker = JTConfig.JT_IPC_ADDRESS + "=localhost:" + mr.createJobConf().get(JTConfig.JT_IPC_ADDRESS);
String strNamenode = "fs.default.name=" + mr.createJobConf().get("fs.default.name");
return new String[] {
"-input", inputPath.toString(),
@@ -63,7 +63,6 @@ public class TestUlimit {
"-jobconf", MRJobConfig.NUM_MAPS + "=1",
"-jobconf", JobConf.MAPRED_MAP_TASK_ULIMIT + "=" + memLimit,
"-jobconf", strNamenode,
- "-jobconf", strJobtracker,
"-jobconf", "stream.tmpdir=" +
System.getProperty("test.build.data","/tmp"),
"-jobconf", "mapred.jar=" + TestStreaming.STREAMING_JAR,
@@ -79,6 +78,7 @@ public class TestUlimit {
* is expected to be a failure.
*/
@Test
+ @Ignore
public void testCommandLine() {
if (UtilTest.isCygwin()) {
return;
diff --git a/pom.xml b/pom.xml
index 7dc828a118d..3ecaf8832ba 100644
--- a/pom.xml
+++ b/pom.xml
@@ -79,6 +79,8 @@
hadoop-mapreduce-project
hadoop-tools
hadoop-dist
+ hadoop-client
+ hadoop-minicluster