HDFS-7223. Tracing span description of IPC client is too long (iwasakims via cmccabe)
(cherry picked from commit 5b56ac4c72
)
This commit is contained in:
parent
c99d89f2c6
commit
caa658a015
|
@ -212,9 +212,7 @@ public class ProtobufRpcEngine implements RpcEngine {
|
||||||
// guard it in the if statement to make sure there isn't
|
// guard it in the if statement to make sure there isn't
|
||||||
// any extra string manipulation.
|
// any extra string manipulation.
|
||||||
if (Trace.isTracing()) {
|
if (Trace.isTracing()) {
|
||||||
traceScope = Trace.startSpan(
|
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
|
||||||
method.getDeclaringClass().getCanonicalName() +
|
|
||||||
"." + method.getName());
|
|
||||||
}
|
}
|
||||||
|
|
||||||
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
||||||
|
|
|
@ -189,4 +189,25 @@ public class RpcClientUtil {
|
||||||
.getProtocolMetaInfoProxy(inv.getConnectionId(), conf,
|
.getProtocolMetaInfoProxy(inv.getConnectionId(), conf,
|
||||||
NetUtils.getDefaultSocketFactory(conf)).getProxy();
|
NetUtils.getDefaultSocketFactory(conf)).getProxy();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert an RPC method to a string.
|
||||||
|
* The format we want is 'MethodOuterClassShortName#methodName'.
|
||||||
|
*
|
||||||
|
* For example, if the method is:
|
||||||
|
* org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.
|
||||||
|
* ClientNamenodeProtocol.BlockingInterface.getServerDefaults
|
||||||
|
*
|
||||||
|
* the format we want is:
|
||||||
|
* ClientNamenodeProtocol#getServerDefaults
|
||||||
|
*/
|
||||||
|
public static String methodToTraceString(Method method) {
|
||||||
|
Class<?> clazz = method.getDeclaringClass();
|
||||||
|
while (true) {
|
||||||
|
Class<?> next = clazz.getEnclosingClass();
|
||||||
|
if (next == null || next.getEnclosingClass() == null) break;
|
||||||
|
clazz = next;
|
||||||
|
}
|
||||||
|
return clazz.getSimpleName() + "#" + method.getName();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -235,9 +235,7 @@ public class WritableRpcEngine implements RpcEngine {
|
||||||
}
|
}
|
||||||
TraceScope traceScope = null;
|
TraceScope traceScope = null;
|
||||||
if (Trace.isTracing()) {
|
if (Trace.isTracing()) {
|
||||||
traceScope = Trace.startSpan(
|
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
|
||||||
method.getDeclaringClass().getCanonicalName() +
|
|
||||||
"." + method.getName());
|
|
||||||
}
|
}
|
||||||
ObjectWritable value;
|
ObjectWritable value;
|
||||||
try {
|
try {
|
||||||
|
|
|
@ -44,6 +44,9 @@ Release 2.7.0 - UNRELEASED
|
||||||
HDFS-7257. Add the time of last HA state transition to NN's /jmx page.
|
HDFS-7257. Add the time of last HA state transition to NN's /jmx page.
|
||||||
(Charles Lamb via wheat9)
|
(Charles Lamb via wheat9)
|
||||||
|
|
||||||
|
HDFS-7223. Tracing span description of IPC client is too long (iwasakims
|
||||||
|
via cmccabe)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -83,15 +83,15 @@ public class TestTracing {
|
||||||
String[] expectedSpanNames = {
|
String[] expectedSpanNames = {
|
||||||
"testWriteTraceHooks",
|
"testWriteTraceHooks",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
|
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
|
||||||
"org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.create",
|
"ClientNamenodeProtocol#create",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
|
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
|
||||||
"org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.fsync",
|
"ClientNamenodeProtocol#fsync",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
|
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
|
||||||
"org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.complete",
|
"ClientNamenodeProtocol#complete",
|
||||||
"DFSOutputStream",
|
"DFSOutputStream",
|
||||||
"OpWriteBlockProto",
|
"OpWriteBlockProto",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
|
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
|
||||||
"org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.addBlock"
|
"ClientNamenodeProtocol#addBlock"
|
||||||
};
|
};
|
||||||
assertSpanNamesFound(expectedSpanNames);
|
assertSpanNamesFound(expectedSpanNames);
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ public class TestTracing {
|
||||||
String[] expectedSpanNames = {
|
String[] expectedSpanNames = {
|
||||||
"testReadTraceHooks",
|
"testReadTraceHooks",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
|
"org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
|
||||||
"org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ClientNamenodeProtocol.BlockingInterface.getBlockLocations",
|
"ClientNamenodeProtocol#getBlockLocations",
|
||||||
"OpReadBlockProto"
|
"OpReadBlockProto"
|
||||||
};
|
};
|
||||||
assertSpanNamesFound(expectedSpanNames);
|
assertSpanNamesFound(expectedSpanNames);
|
||||||
|
|
Loading…
Reference in New Issue