HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)

(cherry picked from commit a78d5074fb)
This commit is contained in:
Colin Patrick Mccabe 2015-07-01 17:57:11 -07:00
parent d61dd10b50
commit 490bef0856
4 changed files with 38 additions and 10 deletions

View File

@ -170,6 +170,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12124. Add HTrace support for FsShell (cmccabe) HADOOP-12124. Add HTrace support for FsShell (cmccabe)
HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
OPTIMIZATIONS OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp HADOOP-11785. Reduce the number of listStatus operation in distcp

View File

@ -210,4 +210,28 @@ public static String methodToTraceString(Method method) {
} }
return clazz.getSimpleName() + "#" + method.getName(); return clazz.getSimpleName() + "#" + method.getName();
} }
/**
* Convert an RPC class method to a string.
* The format we want is
* 'SecondOutermostClassShortName#OutermostClassShortName'.
*
* For example, if the full class name is:
* org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations
*
* the format we want is:
* ClientProtocol#getBlockLocations
*/
public static String toTraceName(String fullName) {
int lastPeriod = fullName.lastIndexOf('.');
if (lastPeriod < 0) {
return fullName;
}
int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1);
if (secondLastPeriod < 0) {
return fullName;
}
return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" +
fullName.substring(lastPeriod + 1);
}
} }

View File

@ -1878,7 +1878,9 @@ private void processRpcRequest(RpcRequestHeaderProto header,
// If the incoming RPC included tracing info, always continue the trace // If the incoming RPC included tracing info, always continue the trace
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(), TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId()); header.getTraceInfo().getParentId());
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach(); traceSpan = Trace.startSpan(
RpcClientUtil.toTraceName(rpcRequest.toString()),
parentSpan).detach();
} }
Call call = new Call(header.getCallId(), header.getRetryCount(), Call call = new Call(header.getCallId(), header.getRetryCount(),

View File

@ -67,18 +67,18 @@ public void writeWithTracing() throws Exception {
String[] expectedSpanNames = { String[] expectedSpanNames = {
"testWriteTraceHooks", "testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create", "ClientProtocol#create",
"ClientNamenodeProtocol#create", "ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", "ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync", "ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "ClientProtocol#complete",
"ClientNamenodeProtocol#complete", "ClientNamenodeProtocol#complete",
"newStreamForCreate", "newStreamForCreate",
"DFSOutputStream#write", "DFSOutputStream#write",
"DFSOutputStream#close", "DFSOutputStream#close",
"dataStreamer", "dataStreamer",
"OpWriteBlockProto", "OpWriteBlockProto",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock", "ClientProtocol#addBlock",
"ClientNamenodeProtocol#addBlock" "ClientNamenodeProtocol#addBlock"
}; };
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames); SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
@ -95,11 +95,11 @@ public void writeWithTracing() throws Exception {
// and children of them are exception. // and children of them are exception.
String[] spansInTopTrace = { String[] spansInTopTrace = {
"testWriteTraceHooks", "testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create", "ClientProtocol#create",
"ClientNamenodeProtocol#create", "ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", "ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync", "ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", "ClientProtocol#complete",
"ClientNamenodeProtocol#complete", "ClientNamenodeProtocol#complete",
"newStreamForCreate", "newStreamForCreate",
"DFSOutputStream#write", "DFSOutputStream#write",
@ -113,7 +113,7 @@ public void writeWithTracing() throws Exception {
// test for timeline annotation added by HADOOP-11242 // test for timeline annotation added by HADOOP-11242
Assert.assertEquals("called", Assert.assertEquals("called",
map.get("org.apache.hadoop.hdfs.protocol.ClientProtocol.create") map.get("ClientProtocol#create")
.get(0).getTimelineAnnotations() .get(0).getTimelineAnnotations()
.get(0).getMessage()); .get(0).getMessage());
@ -131,7 +131,7 @@ public void readWithTracing() throws Exception {
String[] expectedSpanNames = { String[] expectedSpanNames = {
"testReadTraceHooks", "testReadTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations", "ClientProtocol#getBlockLocations",
"ClientNamenodeProtocol#getBlockLocations", "ClientNamenodeProtocol#getBlockLocations",
"OpReadBlockProto" "OpReadBlockProto"
}; };