Merge -c 1491989 from trunk to branch-2 to fix RN-767. Initialize application metrics at RM bootup. Contributed by Jian He.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1491990 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Arun Murthy 2013-06-11 22:16:03 +00:00
parent 443578ccf7
commit 9cc1150be1
5 changed files with 36 additions and 3 deletions

View File

@ -468,6 +468,9 @@ Release 2.1.0-beta - UNRELEASED
YARN-760. NodeManager throws AvroRuntimeException on failed start. YARN-760. NodeManager throws AvroRuntimeException on failed start.
(Niranjan Singh via jlowe) (Niranjan Singh via jlowe)
YARN-767. Initialize application metrics at RM bootup. (Jian He via
acmurthy)
BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
YARN-158. Yarn creating package-info.java must not depend on sh. YARN-158. Yarn creating package-info.java must not depend on sh.

View File

@ -223,7 +223,10 @@ public class ResourceManager extends CompositeService implements Recoverable {
this.resourceTracker = createResourceTrackerService(); this.resourceTracker = createResourceTrackerService();
addService(resourceTracker); addService(resourceTracker);
DefaultMetricsSystem.initialize("ResourceManager");
JvmMetrics.initSingleton("ResourceManager", null);
try { try {
this.scheduler.reinitialize(conf, this.rmContext); this.scheduler.reinitialize(conf, this.rmContext);
} catch (IOException ioe) { } catch (IOException ioe) {
@ -598,8 +601,6 @@ public class ResourceManager extends CompositeService implements Recoverable {
} }
startWepApp(); startWepApp();
DefaultMetricsSystem.initialize("ResourceManager");
JvmMetrics.initSingleton("ResourceManager", null);
try { try {
rmDTSecretManager.startThreads(); rmDTSecretManager.startThreads();
} catch(IOException ie) { } catch(IOException ie) {

View File

@ -126,6 +126,18 @@ public class QueueMetrics implements MetricsSource {
enableUserMetrics, conf); enableUserMetrics, conf);
} }
// this method is here because we want to make sure these metrics show up on
// queue registration.
private void initMetrics() {
appsSubmitted.incr(0);
appsRunning.incr(0);
appsPending.incr(0);
appsCompleted.incr(0);
appsKilled.incr(0);
appsFailed.incr(0);
reservedContainers.incr(0);
}
/** /**
* Helper method to clear cache - used only for unit tests. * Helper method to clear cache - used only for unit tests.
*/ */
@ -156,6 +168,7 @@ public class QueueMetrics implements MetricsSource {
ms.register( ms.register(
sourceName(queueName).toString(), sourceName(queueName).toString(),
"Metrics for queue: " + queueName, metrics); "Metrics for queue: " + queueName, metrics);
metrics.initMetrics();
} }
queueMetrics.put(queueName, metrics); queueMetrics.put(queueName, metrics);
} }

View File

@ -32,10 +32,14 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource; import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.impl.MetricsSystemImpl; import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
import org.apache.hadoop.test.MetricsAsserts;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -264,6 +268,16 @@ public class TestQueueMetrics {
} }
} }
@Test
public void testMetricsInitializedOnRMInit() {
YarnConfiguration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER,
FifoScheduler.class, ResourceScheduler.class);
MockRM rm = new MockRM(conf);
QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics();
checkApps(metrics, 0, 0, 0, 0, 0, 0);
MetricsAsserts.assertGauge("ReservedContainers", 0, metrics);
}
public static void checkApps(MetricsSource source, int submitted, int pending, public static void checkApps(MetricsSource source, int submitted, int pending,
int running, int completed, int failed, int killed) { int running, int completed, int failed, int killed) {

View File

@ -39,6 +39,7 @@ import javax.xml.parsers.ParserConfigurationException;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.Clock; import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.MockApps;
@ -133,6 +134,7 @@ public class TestFairScheduler {
scheduler = null; scheduler = null;
resourceManager = null; resourceManager = null;
QueueMetrics.clearQueueMetrics(); QueueMetrics.clearQueueMetrics();
DefaultMetricsSystem.shutdown();
} }
private Configuration createConfiguration() { private Configuration createConfiguration() {