HADOOP-6920. Metrics instrumentation to move new metrics2 framework. Contributed by Luke Lu.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1101682 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2011-05-10 23:56:54 +00:00
parent c0d70530b1
commit 8a2b40d072
26 changed files with 658 additions and 742 deletions

View File

@ -26,6 +26,10 @@ Trunk (unreleased changes)
HADOOP-6919. New metrics2 framework. (Luke Lu via acmurthy)
HADOOP-6920. Metrics instrumentation to move new metrics2 framework.
(Luke Lu via suresh)
IMPROVEMENTS
HADOOP-7042. Updates to test-patch.sh to include failed test names and

View File

@ -2,15 +2,26 @@
# See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
*.sink.file.class=org.apache.hadoop.metrics2.sink.FileSink
# default sampling period
*.period=10
# The namenode-metrics.out will contain metrics from all context
#namenode.sink.file.filename=namenode-metrics.out
# Specifying a special sampling period for namenode:
#namenode.sink.*.period=8
#datanode.sink.file.filename=datanode-metrics.out
#jobtracker.sink.file.filename=jobtracker-metrics.out
# the following example split metrics of different
# context to different sinks (in this case files)
#jobtracker.sink.file_jvm.context=jvm
#jobtracker.sink.file_jvm.filename=jobtracker-jvm-metrics.out
#jobtracker.sink.file_mapred.context=mapred
#jobtracker.sink.file_mapred.filename=jobtracker-mapred-metrics.out
#tasktracker.sink.file.filename=tasktracker-metrics.out
#maptask.sink.file.filename=maptask-metrics.out
#reducetask.sink.file.filename=reducetask-metrics.out

View File

@ -117,7 +117,7 @@ log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
# Event Counter Appender
# Sends counts of logging messages at different severity levels to Hadoop Metrics.
#
log4j.appender.EventCounter=org.apache.hadoop.metrics.jvm.EventCounter
log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
#
# Job Summary Appender

View File

@ -148,7 +148,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
<version>r07</version>
<version>r09</version>
</dependency>
</dependencies>
</project>

View File

@ -1045,7 +1045,7 @@ public Object run() throws SaslException {
}
doSaslReply(SaslStatus.ERROR, null, sendToClient.getClass().getName(),
sendToClient.getLocalizedMessage());
rpcMetrics.authenticationFailures.inc();
rpcMetrics.incrAuthenticationFailures();
String clientIP = this.toString();
// attempting user could be null
AUDITLOG.warn(AUTH_FAILED_FOR + clientIP + ":" + attemptingUser);
@ -1069,7 +1069,7 @@ public Object run() throws SaslException {
if (LOG.isDebugEnabled()) {
LOG.debug("SASL server successfully authenticated client: " + user);
}
rpcMetrics.authenticationSuccesses.inc();
rpcMetrics.incrAuthenticationSuccesses();
AUDITLOG.info(AUTH_SUCCESSFULL_FOR + user);
saslContextEstablished = true;
}
@ -1227,6 +1227,7 @@ private void processHeader(byte[] buf) throws IOException {
String protocolClassName = header.getProtocol();
if (protocolClassName != null) {
protocol = getProtocolClass(header.getProtocol(), conf);
rpcDetailedMetrics.init(protocol);
}
} catch (ClassNotFoundException cnfe) {
throw new IOException("Unknown protocol: " + header.getProtocol());
@ -1349,9 +1350,9 @@ private boolean authorizeConnection() throws IOException {
if (LOG.isDebugEnabled()) {
LOG.debug("Successfully authorized " + header);
}
rpcMetrics.authorizationSuccesses.inc();
rpcMetrics.incrAuthorizationSuccesses();
} catch (AuthorizationException ae) {
rpcMetrics.authorizationFailures.inc();
rpcMetrics.incrAuthorizationFailures();
setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null,
ae.getClass().getName(), ae.getMessage());
responder.doRespond(authFailedCall);
@ -1520,10 +1521,8 @@ protected Server(String bindAddress, int port,
// Start the listener here and let it bind to the port
listener = new Listener();
this.port = listener.getAddress().getPort();
this.rpcMetrics = new RpcMetrics(serverName,
Integer.toString(this.port), this);
this.rpcDetailedMetrics = new RpcDetailedMetrics(serverName,
Integer.toString(this.port));
this.rpcMetrics = RpcMetrics.create(this);
this.rpcDetailedMetrics = RpcDetailedMetrics.create(this.port);
this.tcpNoDelay = conf.getBoolean("ipc.server.tcpnodelay", false);
// Create the responder here
@ -1759,7 +1758,7 @@ private int channelWrite(WritableByteChannel channel,
int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
channel.write(buffer) : channelIO(null, channel, buffer);
if (count > 0) {
rpcMetrics.sentBytes.inc(count);
rpcMetrics.incrSentBytes(count);
}
return count;
}
@ -1779,7 +1778,7 @@ private int channelRead(ReadableByteChannel channel,
int count = (buffer.remaining() <= NIO_BUFFER_LIMIT) ?
channel.read(buffer) : channelIO(channel, null, buffer);
if (count > 0) {
rpcMetrics.receivedBytes.inc(count);
rpcMetrics.incrReceivedBytes(count);
}
return count;
}

View File

@ -41,7 +41,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
/** An RpcEngine implementation for Writable data. */
@InterfaceStability.Evolving
@ -390,9 +389,8 @@ public Writable call(Class<?> protocol, Writable param, long receivedTime)
Invocation call = (Invocation)param;
if (verbose) log("Call: " + call);
Method method =
protocol.getMethod(call.getMethodName(),
call.getParameterClasses());
Method method = protocol.getMethod(call.getMethodName(),
call.getParameterClasses());
method.setAccessible(true);
// Verify rpc version
@ -429,24 +427,10 @@ public Writable call(Class<?> protocol, Writable param, long receivedTime)
" queueTime= " + qTime +
" procesingTime= " + processingTime);
}
rpcMetrics.rpcQueueTime.inc(qTime);
rpcMetrics.rpcProcessingTime.inc(processingTime);
MetricsTimeVaryingRate m =
(MetricsTimeVaryingRate) rpcDetailedMetrics.registry.get(call.getMethodName());
if (m == null) {
try {
m = new MetricsTimeVaryingRate(call.getMethodName(),
rpcDetailedMetrics.registry);
} catch (IllegalArgumentException iae) {
// the metrics has been registered; re-fetch the handle
LOG.info("Error register " + call.getMethodName(), iae);
m = (MetricsTimeVaryingRate) rpcDetailedMetrics.registry.get(
call.getMethodName());
}
}
m.inc(processingTime);
rpcMetrics.addRpcQueueTime(qTime);
rpcMetrics.addRpcProcessingTime(processingTime);
rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
processingTime);
if (verbose) log("Return: "+value);
return new ObjectWritable(method.getReturnType(), value);

View File

@ -1,79 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import javax.management.ObjectName;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
/**
*
* This is the JMX MBean for reporting the RPC layer Activity.
* The MBean is register using the name
* "hadoop:service=<RpcServiceName>,name=RpcActivityForPort<port>"
*
* Many of the activity metrics are sampled and averaged on an interval
* which can be specified in the metrics config file.
* <p>
* For the metrics that are sampled and averaged, one must specify
* a metrics context that does periodic update calls. Most metrics contexts do.
* The default Null metrics context however does NOT. So if you aren't
* using any other metrics context then you can turn on the viewing and averaging
* of sampled metrics by specifying the following two lines
* in the hadoop-meterics.properties file:
* <pre>
* rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
* rpc.period=10
* </pre>
*<p>
* Note that the metrics are collected regardless of the context used.
* The context with the update thread is used to average the data periodically
*
*
*
* Impl details: We use a dynamic mbean that gets the list of the metrics
* from the metrics registry passed as an argument to the constructor
*/
@InterfaceAudience.Private
public class RpcActivityMBean extends MetricsDynamicMBeanBase {
private final ObjectName mbeanName;
/**
*
* @param mr - the metrics registry that has all the metrics
* @param serviceName - the service name for the rpc service
* @param port - the rpc port.
*/
public RpcActivityMBean(final MetricsRegistry mr, final String serviceName,
final String port) {
super(mr, "Rpc layer statistics");
mbeanName = MBeanUtil.registerMBean(serviceName,
"RpcActivityForPort" + port, this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
}

View File

@ -1,74 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import javax.management.ObjectName;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics.util.MBeanUtil;
import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
/**
*
* This is the JMX MBean for reporting the RPC layer Activity. The MBean is
* register using the name
* "hadoop:service=<RpcServiceName>,name=RpcDetailedActivityForPort<port>"
*
* Many of the activity metrics are sampled and averaged on an interval which
* can be specified in the metrics config file.
* <p>
* For the metrics that are sampled and averaged, one must specify a metrics
* context that does periodic update calls. Most metrics contexts do. The
* default Null metrics context however does NOT. So if you aren't using any
* other metrics context then you can turn on the viewing and averaging of
* sampled metrics by specifying the following two lines in the
* hadoop-meterics.properties file:
*
* <pre>
* rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
* rpc.period=10
* </pre>
*<p>
* Note that the metrics are collected regardless of the context used. The
* context with the update thread is used to average the data periodically
*
* Impl details: We use a dynamic mbean that gets the list of the metrics from
* the metrics registry passed as an argument to the constructor
*/
@InterfaceAudience.Private
public class RpcDetailedActivityMBean extends MetricsDynamicMBeanBase {
private final ObjectName mbeanName;
/**
* @param mr - the metrics registry that has all the metrics
* @param serviceName - the service name for the rpc service
* @param port - the rpc port.
*/
public RpcDetailedActivityMBean(final MetricsRegistry mr,
final String serviceName, final String port) {
super(mr, "Rpc layer detailed statistics");
mbeanName = MBeanUtil.registerMBean(serviceName,
"RpcDetailedActivityForPort" + port, this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
}

View File

@ -20,65 +20,61 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableRates;
/**
*
* This class is for maintaining the various RPC method related statistics
* This class is for maintaining RPC method related statistics
* and publishing them through the metrics interfaces.
* This also registers the JMX MBean for RPC.
*/
@InterfaceAudience.Private
public class RpcDetailedMetrics implements Updater {
public final MetricsRegistry registry = new MetricsRegistry();
private final MetricsRecord metricsRecord;
private static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
RpcDetailedActivityMBean rpcMBean;
@Metrics(about="Per method RPC metrics", context="rpcdetailed")
public class RpcDetailedMetrics {
@Metric MutableRates rates;
static final Log LOG = LogFactory.getLog(RpcDetailedMetrics.class);
final MetricsRegistry registry;
final String name;
RpcDetailedMetrics(int port) {
name = "RpcDetailedActivityForPort"+ port;
registry = new MetricsRegistry("rpcdetailed")
.tag("port", "RPC port", String.valueOf(port));
LOG.debug(registry.info());
}
public String name() { return name; }
public static RpcDetailedMetrics create(int port) {
RpcDetailedMetrics m = new RpcDetailedMetrics(port);
return DefaultMetricsSystem.instance().register(m.name, null, m);
}
/**
* Statically added metrics to expose at least one metrics, without
* which other dynamically added metrics are not exposed over JMX.
* Initialize the metrics for JMX with protocol methods
* @param protocol the protocol class
*/
final MetricsTimeVaryingRate getProtocolVersion =
new MetricsTimeVaryingRate("getProtocolVersion", registry);
public RpcDetailedMetrics(final String hostName, final String port) {
MetricsContext context = MetricsUtil.getContext("rpc");
metricsRecord = MetricsUtil.createRecord(context, "detailed-metrics");
metricsRecord.setTag("port", port);
LOG.info("Initializing RPC Metrics with hostName="
+ hostName + ", port=" + port);
context.registerUpdater(this);
// Need to clean up the interface to RpcMgt - don't need both metrics and server params
rpcMBean = new RpcDetailedActivityMBean(registry, hostName, port);
public void init(Class<?> protocol) {
rates.init(protocol);
}
/**
* Push the metrics to the monitoring subsystem on doUpdate() call.
* Add an RPC processing time sample
* @param name of the RPC call
* @param processingTime the processing time
*/
public void doUpdates(final MetricsContext context) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
//@Override // some instrumentation interface
public void addProcessingTime(String name, int processingTime) {
rates.add(name, processingTime);
}
public void shutdown() {
if (rpcMBean != null)
rpcMBean.shutdown();
}
/**
* Shutdown the instrumentation for the process
*/
//@Override // some instrumentation interface
public void shutdown() {}
}

View File

@ -19,136 +19,141 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsIntValue;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingLong;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MetricsRegistry;
import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
import org.apache.hadoop.metrics2.lib.MutableRate;
/**
*
* This class is for maintaining the various RPC statistics
* and publishing them through the metrics interfaces.
* This also registers the JMX MBean for RPC.
* <p>
* This class has a number of metrics variables that are publicly accessible;
* these variables (objects) have methods to update their values;
* for example:
* <p> {@link #rpcQueueTime}.inc(time)
*
*/
@InterfaceAudience.Private
public class RpcMetrics implements Updater {
private final MetricsRegistry registry = new MetricsRegistry();
private final MetricsRecord metricsRecord;
private final Server myServer;
private static final Log LOG = LogFactory.getLog(RpcMetrics.class);
RpcActivityMBean rpcMBean;
@Metrics(about="Aggregate RPC metrics", context="rpc")
public class RpcMetrics {
static final Log LOG = LogFactory.getLog(RpcMetrics.class);
final Server server;
final MetricsRegistry registry;
final String name;
public RpcMetrics(final String hostName, final String port,
final Server server) {
myServer = server;
MetricsContext context = MetricsUtil.getContext("rpc");
metricsRecord = MetricsUtil.createRecord(context, "metrics");
metricsRecord.setTag("port", port);
LOG.info("Initializing RPC Metrics with hostName="
+ hostName + ", port=" + port);
context.registerUpdater(this);
// Need to clean up the interface to RpcMgt - don't need both metrics and server params
rpcMBean = new RpcActivityMBean(registry, hostName, port);
RpcMetrics(Server server) {
String port = String.valueOf(server.getListenerAddress().getPort());
name = "RpcActivityForPort"+ port;
this.server = server;
registry = new MetricsRegistry("rpc").tag("port", "RPC port", port);
LOG.debug("Initialized "+ registry);
}
/**
* The metrics variables are public:
* - they can be set directly by calling their set/inc methods
* -they can also be read directly - e.g. JMX does this.
*/
public String name() { return name; }
public static RpcMetrics create(Server server) {
RpcMetrics m = new RpcMetrics(server);
return DefaultMetricsSystem.instance().register(m.name, null, m);
}
@Metric("Number of received bytes") MutableCounterLong receivedBytes;
@Metric("Number of sent bytes") MutableCounterLong sentBytes;
@Metric("Queue time") MutableRate rpcQueueTime;
@Metric("Processsing time") MutableRate rpcProcessingTime;
@Metric("Number of authentication failures")
MutableCounterInt rpcAuthenticationFailures;
@Metric("Number of authentication successes")
MutableCounterInt rpcAuthenticationSuccesses;
@Metric("Number of authorization failures")
MutableCounterInt rpcAuthorizationFailures;
@Metric("Number of authorization sucesses")
MutableCounterInt rpcAuthorizationSuccesses;
@Metric("Number of open connections") public int numOpenConnections() {
return server.getNumOpenConnections();
}
@Metric("Length of the call queue") public int callQueueLength() {
return server.getCallQueueLen();
}
// Public instrumentation methods that could be extracted to an
// abstract class if we decide to do custom instrumentation classes a la
// JobTrackerInstrumenation. The methods with //@Override comment are
// candidates for abstract methods in a abstract instrumentation class.
/**
* metrics - number of bytes received
* One authentication failure event
*/
public final MetricsTimeVaryingLong receivedBytes =
new MetricsTimeVaryingLong("ReceivedBytes", registry);
/**
* metrics - number of bytes sent
*/
public final MetricsTimeVaryingLong sentBytes =
new MetricsTimeVaryingLong("SentBytes", registry);
/**
* metrics - rpc queue time
*/
public final MetricsTimeVaryingRate rpcQueueTime =
new MetricsTimeVaryingRate("RpcQueueTime", registry);
/**
* metrics - rpc processing time
*/
public final MetricsTimeVaryingRate rpcProcessingTime =
new MetricsTimeVaryingRate("RpcProcessingTime", registry);
/**
* metrics - number of open connections
*/
public final MetricsIntValue numOpenConnections =
new MetricsIntValue("NumOpenConnections", registry);
/**
* metrics - length of the queue
*/
public final MetricsIntValue callQueueLen =
new MetricsIntValue("callQueueLen", registry);
/**
* metrics - number of failed authentications
*/
public final MetricsTimeVaryingInt authenticationFailures =
new MetricsTimeVaryingInt("rpcAuthenticationFailures", registry);
/**
* metrics - number of successful authentications
*/
public final MetricsTimeVaryingInt authenticationSuccesses =
new MetricsTimeVaryingInt("rpcAuthenticationSuccesses", registry);
/**
* metrics - number of failed authorizations
*/
public final MetricsTimeVaryingInt authorizationFailures =
new MetricsTimeVaryingInt("rpcAuthorizationFailures", registry);
/**
* metrics - number of successful authorizations
*/
public final MetricsTimeVaryingInt authorizationSuccesses =
new MetricsTimeVaryingInt("rpcAuthorizationSuccesses", registry);
/**
* Push the metrics to the monitoring subsystem on doUpdate() call.
*/
public void doUpdates(final MetricsContext context) {
synchronized (this) {
// ToFix - fix server to use the following two metrics directly so
// the metrics do not have be copied here.
numOpenConnections.set(myServer.getNumOpenConnections());
callQueueLen.set(myServer.getCallQueueLen());
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
//@Override
public void incrAuthenticationFailures() {
rpcAuthenticationFailures.incr();
}
/**
* shutdown the metrics
* One authentication success event
*/
public void shutdown() {
if (rpcMBean != null)
rpcMBean.shutdown();
//@Override
public void incrAuthenticationSuccesses() {
rpcAuthenticationSuccesses.incr();
}
/**
* One authorization success event
*/
//@Override
public void incrAuthorizationSuccesses() {
rpcAuthorizationSuccesses.incr();
}
/**
* One authorization failure event
*/
//@Override
public void incrAuthorizationFailures() {
rpcAuthorizationFailures.incr();
}
/**
* Shutdown the instrumentation for the process
*/
//@Override
public void shutdown() {}
/**
* Increment sent bytes by count
* @param count to increment
*/
//@Override
public void incrSentBytes(int count) {
sentBytes.incr(count);
}
/**
* Increment received bytes by count
* @param count to increment
*/
//@Override
public void incrReceivedBytes(int count) {
receivedBytes.incr(count);
}
/**
* Add an RPC queue time sample
* @param qTime the queue time
*/
//@Override
public void addRpcQueueTime(int qTime) {
rpcQueueTime.add(qTime);
}
/**
* Add an RPC processing time sample
* @param processingTime the processing time
*/
//@Override
public void addRpcProcessingTime(int processingTime) {
rpcProcessingTime.add(processingTime);
}
}

View File

@ -1,121 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import javax.management.ObjectName;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics.util.MBeanUtil;
/**
* This class implements the RpcMgt MBean
*
*/
@InterfaceAudience.Private
class RpcMgt implements RpcMgtMBean {
private RpcMetrics myMetrics;
private Server myServer;
private ObjectName mbeanName;
RpcMgt(final String serviceName, final String port,
final RpcMetrics metrics, Server server) {
myMetrics = metrics;
myServer = server;
mbeanName = MBeanUtil.registerMBean(serviceName,
"RpcStatisticsForPort" + port, this);
}
public void shutdown() {
if (mbeanName != null)
MBeanUtil.unregisterMBean(mbeanName);
}
/**
* @inheritDoc
*/
public long getRpcOpsAvgProcessingTime() {
return myMetrics.rpcProcessingTime.getPreviousIntervalAverageTime();
}
/**
* @inheritDoc
*/
public long getRpcOpsAvgProcessingTimeMax() {
return myMetrics.rpcProcessingTime.getMaxTime();
}
/**
* @inheritDoc
*/
public long getRpcOpsAvgProcessingTimeMin() {
return myMetrics.rpcProcessingTime.getMinTime();
}
/**
* @inheritDoc
*/
public long getRpcOpsAvgQueueTime() {
return myMetrics.rpcQueueTime.getPreviousIntervalAverageTime();
}
/**
* @inheritDoc
*/
public long getRpcOpsAvgQueueTimeMax() {
return myMetrics.rpcQueueTime.getMaxTime();
}
/**
* @inheritDoc
*/
public long getRpcOpsAvgQueueTimeMin() {
return myMetrics.rpcQueueTime.getMinTime();
}
/**
* @inheritDoc
*/
public int getRpcOpsNumber() {
return myMetrics.rpcProcessingTime.getPreviousIntervalNumOps() ;
}
/**
* @inheritDoc
*/
public int getNumOpenConnections() {
return myServer.getNumOpenConnections();
}
/**
* @inheritDoc
*/
public int getCallQueueLen() {
return myServer.getCallQueueLen();
}
/**
* @inheritDoc
*/
public void resetAllMinMax() {
myMetrics.rpcProcessingTime.resetMinMax();
myMetrics.rpcQueueTime.resetMinMax();
}
}

View File

@ -1,108 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.ipc.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
/**
*
* This is the JMX management interface for the RPC layer.
* Many of the statistics are sampled and averaged on an interval
* which can be specified in the metrics config file.
* <p>
* For the statistics that are sampled and averaged, one must specify
* a metrics context that does periodic update calls. Most do.
* The default Null metrics context however does NOT. So if you aren't
* using any other metrics context then you can turn on the viewing and averaging
* of sampled metrics by specifying the following two lines
* in the hadoop-meterics.properties file:
* <pre>
* rpc.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
* rpc.period=10
* </pre>
*<p>
* Note that the metrics are collected regardless of the context used.
* The context with the update thread is used to average the data periodically
*
*/
@InterfaceAudience.Private
public interface RpcMgtMBean {
/**
* Number of RPC Operations in the last interval
* @return number of operations
*/
int getRpcOpsNumber();
/**
* Average time for RPC Operations in last interval
* @return time in msec
*/
long getRpcOpsAvgProcessingTime();
/**
* The Minimum RPC Operation Processing Time since reset was called
* @return time in msec
*/
long getRpcOpsAvgProcessingTimeMin();
/**
* The Maximum RPC Operation Processing Time since reset was called
* @return time in msec
*/
long getRpcOpsAvgProcessingTimeMax();
/**
* The Average RPC Operation Queued Time in the last interval
* @return time in msec
*/
long getRpcOpsAvgQueueTime();
/**
* The Minimum RPC Operation Queued Time since reset was called
* @return time in msec
*/
long getRpcOpsAvgQueueTimeMin();
/**
* The Maximum RPC Operation Queued Time since reset was called
* @return time in msec
*/
long getRpcOpsAvgQueueTimeMax();
/**
* Reset all min max times
*/
void resetAllMinMax();
/**
* The number of open RPC conections
* @return the number of open rpc connections
*/
public int getNumOpenConnections();
/**
* The number of rpc calls in the queue.
* @return The number of rpc calls in the queue.
*/
public int getCallQueueLen();
}

View File

@ -15,6 +15,10 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* RPC related metrics.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
package org.apache.hadoop.ipc.metrics;

View File

@ -0,0 +1,34 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log;
/**
* A log4J Appender that simply counts logging events in three levels:
* fatal, error and warn. The class name is used in log4j.properties
* @deprecated use {@link org.apache.hadoop.log.metrics.EventCounter} instead
*/
@Deprecated
public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter {
static {
// The logging system is not started yet.
System.err.println("WARNING: "+ EventCounter.class.getName() +
" is deprecated. Please use "+
org.apache.hadoop.log.metrics.EventCounter.class.getName() +
" in all the log4j.properties files.");
}
}

View File

@ -0,0 +1,100 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.log.metrics;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LoggingEvent;
/**
* A log4J Appender that simply counts logging events in three levels:
* fatal, error and warn. The class name is used in log4j.properties
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
public class EventCounter extends AppenderSkeleton {
private static final int FATAL = 0;
private static final int ERROR = 1;
private static final int WARN = 2;
private static final int INFO = 3;
private static class EventCounts {
private final long[] counts = {0, 0, 0, 0};
private synchronized void incr(int i) {
++counts[i];
}
private synchronized long get(int i) {
return counts[i];
}
}
private static EventCounts counts = new EventCounts();
@InterfaceAudience.Private
public static long getFatal() {
return counts.get(FATAL);
}
@InterfaceAudience.Private
public static long getError() {
return counts.get(ERROR);
}
@InterfaceAudience.Private
public static long getWarn() {
return counts.get(WARN);
}
@InterfaceAudience.Private
public static long getInfo() {
return counts.get(INFO);
}
@Override
public void append(LoggingEvent event) {
Level level = event.getLevel();
// depends on the api, == might not work
// see HADOOP-7055 for details
if (level.equals(Level.INFO)) {
counts.incr(INFO);
}
else if (level.equals(Level.WARN)) {
counts.incr(WARN);
}
else if (level.equals(Level.ERROR)) {
counts.incr(ERROR);
}
else if (level.equals(Level.FATAL)) {
counts.incr(FATAL);
}
}
@Override
public void close() {
}
@Override
public boolean requiresLayout() {
return false;
}
}

View File

@ -42,7 +42,7 @@
* myContextName.fileName=/tmp/metrics.log
* myContextName.period=5
* </pre>
* @deprecated use {@link org.apache.hadoop.metrics2.sink.FileSink} instead.
* @see org.apache.hadoop.metrics2.sink.FileSink for metrics 2.0.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving

View File

@ -17,82 +17,17 @@
*/
package org.apache.hadoop.metrics.jvm;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.log4j.AppenderSkeleton;
import org.apache.log4j.Level;
import org.apache.log4j.spi.LoggingEvent;
/**
* A log4J Appender that simply counts logging events in three levels:
* fatal, error and warn.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class EventCounter extends AppenderSkeleton {
private static final int FATAL = 0;
private static final int ERROR = 1;
private static final int WARN = 2;
private static final int INFO = 3;
private static class EventCounts {
private final long[] counts = { 0, 0, 0, 0 };
private synchronized void incr(int i) {
++counts[i];
}
private synchronized long get(int i) {
return counts[i];
}
}
private static EventCounts counts = new EventCounts();
public static long getFatal() {
return counts.get(FATAL);
}
public static long getError() {
return counts.get(ERROR);
}
public static long getWarn() {
return counts.get(WARN);
}
public static long getInfo() {
return counts.get(INFO);
}
public void append(LoggingEvent event) {
Level level = event.getLevel();
if (level == Level.INFO) {
counts.incr(INFO);
}
else if (level == Level.WARN) {
counts.incr(WARN);
}
else if (level == Level.ERROR) {
counts.incr(ERROR);
}
else if (level == Level.FATAL) {
counts.incr(FATAL);
}
public class EventCounter extends org.apache.hadoop.log.metrics.EventCounter {
}
// Strange: these two methods are abstract in AppenderSkeleton, but not
// included in the javadoc (log4j 1.2.13).
public void close() {
}
public boolean requiresLayout() {
return false;
}
static {
// The logging system is not started yet.
System.err.println("WARNING: "+ EventCounter.class.getName() +
" is deprecated. Please use "+
org.apache.hadoop.log.metrics.EventCounter.class.getName() +
" in all the log4j.properties files.");
}
}

View File

@ -121,8 +121,9 @@ synchronized String newSourceName(String name, boolean dupOK) {
if (sourceNames.map.containsKey(name)) {
if (dupOK) {
return name;
} else if (!miniClusterMode) {
throw new MetricsException("Metrics source "+ name +" already exists!");
}
throw new MetricsException("Metrics source "+ name +" already exists!");
}
return sourceNames.uniqueName(name);
}

View File

@ -18,16 +18,150 @@
package org.apache.hadoop.metrics2.source;
import java.lang.management.ManagementFactory;
import java.lang.management.MemoryMXBean;
import java.lang.management.MemoryUsage;
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import static java.lang.Thread.State.*;
import java.lang.management.GarbageCollectorMXBean;
import java.util.Map;
import java.util.List;
import com.google.common.collect.Maps;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.log.metrics.EventCounter;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.Interns;
import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*;
import static org.apache.hadoop.metrics2.impl.MsInfo.*;
/**
* JVM related metrics. Mostly used by various servers as part of the metrics
* they export.
* JVM and logging related metrics.
* Mostly used by various servers as a part of the metrics they export.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class JvmMetrics {
// placeholder for javadoc to prevent broken links, until
// HADOOP-6920
public class JvmMetrics implements MetricsSource {
enum Singleton {
INSTANCE;
JvmMetrics impl;
synchronized JvmMetrics init(String processName, String sessionId) {
if (impl == null) {
impl = create(processName, sessionId, DefaultMetricsSystem.instance());
}
return impl;
}
}
static final float M = 1024*1024;
final MemoryMXBean memoryMXBean = ManagementFactory.getMemoryMXBean();
final List<GarbageCollectorMXBean> gcBeans =
ManagementFactory.getGarbageCollectorMXBeans();
final ThreadMXBean threadMXBean = ManagementFactory.getThreadMXBean();
final String processName, sessionId;
final Map<String, MetricsInfo[]> gcInfoCache = Maps.newHashMap();
JvmMetrics(String processName, String sessionId) {
this.processName = processName;
this.sessionId = sessionId;
}
public static JvmMetrics create(String processName, String sessionId,
MetricsSystem ms) {
return ms.register(JvmMetrics.name(), JvmMetrics.description(),
new JvmMetrics(processName, sessionId));
}
public static JvmMetrics initSingleton(String processName, String sessionId) {
return Singleton.INSTANCE.init(processName, sessionId);
}
@Override
public void getMetrics(MetricsCollector collector, boolean all) {
MetricsRecordBuilder rb = collector.addRecord(JvmMetrics)
.setContext("jvm").tag(ProcessName, processName)
.tag(SessionId, sessionId);
getMemoryUsage(rb);
getGcUsage(rb);
getThreadUsage(rb);
getEventCounters(rb);
}
private void getMemoryUsage(MetricsRecordBuilder rb) {
MemoryUsage memNonHeap = memoryMXBean.getNonHeapMemoryUsage();
MemoryUsage memHeap = memoryMXBean.getHeapMemoryUsage();
rb.addGauge(MemNonHeapUsedM, memNonHeap.getUsed() / M)
.addGauge(MemNonHeapCommittedM, memNonHeap.getCommitted() / M)
.addGauge(MemHeapUsedM, memHeap.getUsed() / M)
.addGauge(MemHeapCommittedM, memHeap.getCommitted() / M);
}
private void getGcUsage(MetricsRecordBuilder rb) {
long count = 0;
long timeMillis = 0;
for (GarbageCollectorMXBean gcBean : gcBeans) {
long c = gcBean.getCollectionCount();
long t = gcBean.getCollectionTime();
MetricsInfo[] gcInfo = getGcInfo(gcBean.getName());
rb.addCounter(gcInfo[0], c).addCounter(gcInfo[1], t);
count += c;
timeMillis += t;
}
rb.addCounter(GcCount, count)
.addCounter(GcTimeMillis, timeMillis);
}
private synchronized MetricsInfo[] getGcInfo(String gcName) {
MetricsInfo[] gcInfo = gcInfoCache.get(gcName);
if (gcInfo == null) {
gcInfo = new MetricsInfo[2];
gcInfo[0] = Interns.info("GcCount"+ gcName, "GC Count for "+ gcName);
gcInfo[1] = Interns.info("GcTimeMillis"+ gcName, "GC Time for "+ gcName);
gcInfoCache.put(gcName, gcInfo);
}
return gcInfo;
}
private void getThreadUsage(MetricsRecordBuilder rb) {
int threadsNew = 0;
int threadsRunnable = 0;
int threadsBlocked = 0;
int threadsWaiting = 0;
int threadsTimedWaiting = 0;
int threadsTerminated = 0;
long threadIds[] = threadMXBean.getAllThreadIds();
for (ThreadInfo threadInfo : threadMXBean.getThreadInfo(threadIds, 0)) {
if (threadInfo == null) continue; // race protection
switch (threadInfo.getThreadState()) {
case NEW: threadsNew++; break;
case RUNNABLE: threadsRunnable++; break;
case BLOCKED: threadsBlocked++; break;
case WAITING: threadsWaiting++; break;
case TIMED_WAITING: threadsTimedWaiting++; break;
case TERMINATED: threadsTerminated++; break;
}
}
rb.addGauge(ThreadsNew, threadsNew)
.addGauge(ThreadsRunnable, threadsRunnable)
.addGauge(ThreadsBlocked, threadsBlocked)
.addGauge(ThreadsWaiting, threadsWaiting)
.addGauge(ThreadsTimedWaiting, threadsTimedWaiting)
.addGauge(ThreadsTerminated, threadsTerminated);
}
private void getEventCounters(MetricsRecordBuilder rb) {
rb.addCounter(LogFatal, EventCounter.getFatal())
.addCounter(LogError, EventCounter.getError())
.addCounter(LogWarn, EventCounter.getWarn())
.addCounter(LogInfo, EventCounter.getInfo());
}
}

View File

@ -0,0 +1,61 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.source;
import com.google.common.base.Objects;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
/**
* JVM and logging related metrics info instances
*/
@InterfaceAudience.Private
public enum JvmMetricsInfo implements MetricsInfo {
JvmMetrics("JVM related metrics etc."), // record infoß
// metrics
MemNonHeapUsedM("Non-heap memory used in MB"),
MemNonHeapCommittedM("Non-heap memory committed in MB"),
MemHeapUsedM("Heap memory used in MB"),
MemHeapCommittedM("Heap memory committed in MB"),
GcCount("Total GC count"),
GcTimeMillis("Total GC time in milliseconds"),
ThreadsNew("Number of new threads"),
ThreadsRunnable("Number of runnable threads"),
ThreadsBlocked("Number of blocked threads"),
ThreadsWaiting("Number of waiting threads"),
ThreadsTimedWaiting("Number of timed waiting threads"),
ThreadsTerminated("Number of terminated threads"),
LogFatal("Total number of fatal log events"),
LogError("Total number of error log events"),
LogWarn("Total number of warning log events"),
LogInfo("Total number of info log events");
private final String desc;
JvmMetricsInfo(String desc) { this.desc = desc; }
@Override public String description() { return desc; }
@Override public String toString() {
return Objects.toStringHelper(this)
.add("name", name()).add("description", desc)
.toString();
}
}

View File

@ -52,14 +52,11 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics.MetricsContext;
import org.apache.hadoop.metrics.MetricsRecord;
import org.apache.hadoop.metrics.MetricsUtil;
import org.apache.hadoop.metrics.Updater;
import org.apache.hadoop.metrics.util.MetricsBase;
import org.apache.hadoop.metrics.util.MetricsRegistry;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.lib.MutableRate;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Shell;
@ -87,34 +84,15 @@ public class UserGroupInformation {
* UgiMetrics maintains UGI activity statistics
* and publishes them through the metrics interfaces.
*/
static class UgiMetrics implements Updater {
final MetricsTimeVaryingRate loginSuccess;
final MetricsTimeVaryingRate loginFailure;
private final MetricsRecord metricsRecord;
private final MetricsRegistry registry;
@Metrics(about="User and group related metrics", context="ugi")
static class UgiMetrics {
@Metric("Rate of successful kerberos logins and latency (milliseconds)")
MutableRate loginSuccess;
@Metric("Rate of failed kerberos logins and latency (milliseconds)")
MutableRate loginFailure;
UgiMetrics() {
registry = new MetricsRegistry();
loginSuccess = new MetricsTimeVaryingRate("loginSuccess", registry,
"Rate of successful kerberos logins and time taken in milliseconds");
loginFailure = new MetricsTimeVaryingRate("loginFailure", registry,
"Rate of failed kerberos logins and time taken in milliseconds");
final MetricsContext metricsContext = MetricsUtil.getContext("ugi");
metricsRecord = MetricsUtil.createRecord(metricsContext, "ugi");
metricsContext.registerUpdater(this);
}
/**
* Push the metrics to the monitoring subsystem on doUpdate() call.
*/
@Override
public void doUpdates(final MetricsContext context) {
synchronized (this) {
for (MetricsBase m : registry.getMetricsList()) {
m.pushMetric(metricsRecord);
}
}
metricsRecord.update();
static UgiMetrics create() {
return DefaultMetricsSystem.instance().register(new UgiMetrics());
}
}
@ -180,7 +158,7 @@ public boolean logout() throws LoginException {
}
/** Metrics to track UGI activity */
static UgiMetrics metrics = new UgiMetrics();
static UgiMetrics metrics = UgiMetrics.create();
/** Are the static variables that depend on configuration initialized? */
private static boolean isInitialized = false;
/** Should we use Kerberos configuration? */
@ -630,13 +608,13 @@ static void loginUserFromKeytab(String user,
new LoginContext(HadoopConfiguration.KEYTAB_KERBEROS_CONFIG_NAME, subject);
start = System.currentTimeMillis();
login.login();
metrics.loginSuccess.inc(System.currentTimeMillis() - start);
metrics.loginSuccess.add(System.currentTimeMillis() - start);
loginUser = new UserGroupInformation(subject);
loginUser.setLogin(login);
loginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
} catch (LoginException le) {
if (start > 0) {
metrics.loginFailure.inc(System.currentTimeMillis() - start);
metrics.loginFailure.add(System.currentTimeMillis() - start);
}
throw new IOException("Login failure for " + user + " from keytab " +
path, le);
@ -694,12 +672,12 @@ public synchronized void reloginFromKeytab()
LOG.info("Initiating re-login for " + keytabPrincipal);
start = System.currentTimeMillis();
login.login();
metrics.loginSuccess.inc(System.currentTimeMillis() - start);
metrics.loginSuccess.add(System.currentTimeMillis() - start);
setLogin(login);
}
} catch (LoginException le) {
if (start > 0) {
metrics.loginFailure.inc(System.currentTimeMillis() - start);
metrics.loginFailure.add(System.currentTimeMillis() - start);
}
throw new IOException("Login failure for " + keytabPrincipal +
" from keytab " + keytabFile, le);
@ -779,7 +757,7 @@ static UserGroupInformation loginUserFromKeytabAndReturnUGI(String user,
start = System.currentTimeMillis();
login.login();
metrics.loginSuccess.inc(System.currentTimeMillis() - start);
metrics.loginSuccess.add(System.currentTimeMillis() - start);
UserGroupInformation newLoginUser = new UserGroupInformation(subject);
newLoginUser.setLogin(login);
newLoginUser.setAuthenticationMethod(AuthenticationMethod.KERBEROS);
@ -787,7 +765,7 @@ static UserGroupInformation loginUserFromKeytabAndReturnUGI(String user,
return newLoginUser;
} catch (LoginException le) {
if (start > 0) {
metrics.loginFailure.inc(System.currentTimeMillis() - start);
metrics.loginFailure.add(System.currentTimeMillis() - start);
}
throw new IOException("Login failure for " + user + " from keytab " +
path, le);

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.util;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopVersionAnnotation;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -29,6 +31,8 @@
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class VersionInfo {
private static final Log LOG = LogFactory.getLog(VersionInfo.class);
private static Package myPackage;
private static HadoopVersionAnnotation version;
@ -112,6 +116,7 @@ public static String getBuildVersion(){
}
public static void main(String[] args) {
LOG.debug("version: "+ version);
System.out.println("Hadoop " + getVersion());
System.out.println("Subversion " + getUrl() + " -r " + getRevision());
System.out.println("Compiled by " + getUser() + " on " + getDate());

View File

@ -25,25 +25,22 @@
import java.lang.management.ThreadInfo;
import java.lang.management.ThreadMXBean;
import java.lang.reflect.Method;
import java.util.Arrays;
import junit.framework.TestCase;
import java.util.Arrays;
import org.apache.commons.logging.*;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.UTF8;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.metrics.spi.NullContext;
import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.security.authorize.Service;
import org.apache.hadoop.security.AccessControlException;
import static org.apache.hadoop.test.MetricsAsserts.*;
import static org.mockito.Mockito.*;
@ -288,22 +285,17 @@ public void testCalls(Configuration conf) throws Exception {
assertEquals(stringResult, null);
// Check rpcMetrics
server.rpcMetrics.doUpdates(new NullContext());
assertEquals(3, server.rpcMetrics.rpcProcessingTime.getPreviousIntervalNumOps());
assertTrue(server.rpcMetrics.sentBytes.getPreviousIntervalValue() > 0);
assertTrue(server.rpcMetrics.receivedBytes.getPreviousIntervalValue() > 0);
MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name());
assertCounter("RpcProcessingTimeNumOps", 3L, rb);
assertCounterGt("SentBytes", 0L, rb);
assertCounterGt("ReceivedBytes", 0L, rb);
// Number of calls to echo method should be 2
server.rpcDetailedMetrics.doUpdates(new NullContext());
MetricsTimeVaryingRate metrics =
(MetricsTimeVaryingRate)server.rpcDetailedMetrics.registry.get("echo");
assertEquals(2, metrics.getPreviousIntervalNumOps());
rb = getMetrics(server.rpcDetailedMetrics.name());
assertCounter("EchoNumOps", 2L, rb);
// Number of calls to ping method should be 1
metrics =
(MetricsTimeVaryingRate)server.rpcDetailedMetrics.registry.get("ping");
assertEquals(1, metrics.getPreviousIntervalNumOps());
assertCounter("PingNumOps", 1L, rb);
String[] stringResults = proxy.echo(new String[]{"foo","bar"});
assertTrue(Arrays.equals(stringResults, new String[]{"foo","bar"}));
@ -426,23 +418,16 @@ private void doRPCs(Configuration conf, boolean expectFailure) throws Exception
if (proxy != null) {
RPC.stopProxy(proxy);
}
MetricsRecordBuilder rb = getMetrics(server.rpcMetrics.name());
if (expectFailure) {
assertEquals("Wrong number of authorizationFailures ", 1,
server.getRpcMetrics().authorizationFailures
.getCurrentIntervalValue());
assertCounter("RpcAuthorizationFailures", 1, rb);
} else {
assertEquals("Wrong number of authorizationSuccesses ", 1,
server.getRpcMetrics().authorizationSuccesses
.getCurrentIntervalValue());
assertCounter("RpcAuthorizationSuccesses", 1, rb);
}
//since we don't have authentication turned ON, we should see
// 0 for the authentication successes and 0 for failure
assertEquals("Wrong number of authenticationFailures ", 0,
server.getRpcMetrics().authenticationFailures
.getCurrentIntervalValue());
assertEquals("Wrong number of authenticationSuccesses ", 0,
server.getRpcMetrics().authenticationSuccesses
.getCurrentIntervalValue());
assertCounter("RpcAuthenticationFailures", 0, rb);
assertCounter("RpcAuthenticationSuccesses", 0, rb);
}
}

View File

@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.metrics2.source;
import org.junit.Test;
import static org.mockito.Mockito.*;
import static org.apache.hadoop.test.MetricsAsserts.*;
import org.apache.hadoop.metrics2.MetricsCollector;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import static org.apache.hadoop.metrics2.source.JvmMetricsInfo.*;
import static org.apache.hadoop.metrics2.impl.MsInfo.*;
public class TestJvmMetrics {
@Test public void testPresence() {
MetricsRecordBuilder rb = getMetrics(new JvmMetrics("test", "test"));
MetricsCollector mc = rb.parent();
verify(mc).addRecord(JvmMetrics);
verify(rb).tag(ProcessName, "test");
verify(rb).tag(SessionId, "test");
for (JvmMetricsInfo info : JvmMetricsInfo.values()) {
if (info.name().startsWith("Mem"))
verify(rb).addGauge(eq(info), anyFloat());
else if (info.name().startsWith("Gc"))
verify(rb).addCounter(eq(info), anyLong());
else if (info.name().startsWith("Threads"))
verify(rb).addGauge(eq(info), anyInt());
else if (info.name().startsWith("Log"))
verify(rb).addCounter(eq(info), anyLong());
}
}
}

View File

@ -33,12 +33,14 @@
import javax.security.auth.login.LoginContext;
import junit.framework.Assert;
import org.junit.Test;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.junit.Test;
import static org.apache.hadoop.test.MetricsAsserts.*;
public class TestUserGroupInformation {
final private static String USER_NAME = "user1@HADOOP.APACHE.ORG";
@ -335,18 +337,17 @@ public void testLoginModuleCommit() throws Exception {
Assert.assertTrue(user1 == user2);
}
public static void verifyLoginMetrics(int success, int failure)
public static void verifyLoginMetrics(long success, int failure)
throws IOException {
// Ensure metrics related to kerberos login is updated.
UserGroupInformation.UgiMetrics metrics = UserGroupInformation.metrics;
metrics.doUpdates(null);
MetricsRecordBuilder rb = getMetrics("UgiMetrics");
if (success > 0) {
assertEquals(success, metrics.loginSuccess.getPreviousIntervalNumOps());
assertTrue(metrics.loginSuccess.getPreviousIntervalAverageTime() > 0);
assertCounter("LoginSuccessNumOps", success, rb);
assertGaugeGt("LoginSuccessAvgTime", 0, rb);
}
if (failure > 0) {
assertEquals(failure, metrics.loginFailure.getPreviousIntervalNumOps());
assertTrue(metrics.loginFailure.getPreviousIntervalAverageTime() > 0);
assertCounter("LoginFailureNumPos", failure, rb);
assertGaugeGt("LoginFailureAvgTime", 0, rb);
}
}

View File

@ -259,4 +259,15 @@
The switch condition fall through is intentional and for performance
purposes.
-->
<Match>
<Class name="org.apache.hadoop.log.EventCounter"/>
<!-- backward compatibility -->
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
</Match>
<Match>
<Class name="org.apache.hadoop.metrics.jvm.EventCounter"/>
<!-- backward compatibility -->
<Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
</Match>
</FindBugsFilter>