HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)

This commit is contained in:
Colin Patrick Mccabe 2015-07-01 17:57:11 -07:00
parent 0e4b06690f
commit a78d5074fb
4 changed files with 38 additions and 10 deletions

View File

@ -673,6 +673,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12124. Add HTrace support for FsShell (cmccabe)
HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp

View File

@ -210,4 +210,28 @@ public class RpcClientUtil {
}
return clazz.getSimpleName() + "#" + method.getName();
}
/**
* Convert an RPC class method to a string.
* The format we want is
* 'SecondOutermostClassShortName#OutermostClassShortName'.
*
* For example, if the full class name is:
* org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations
*
* the format we want is:
* ClientProtocol#getBlockLocations
*/
public static String toTraceName(String fullName) {
int lastPeriod = fullName.lastIndexOf('.');
if (lastPeriod < 0) {
return fullName;
}
int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1);
if (secondLastPeriod < 0) {
return fullName;
}
return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" +
fullName.substring(lastPeriod + 1);
}
}

View File

@ -1963,7 +1963,9 @@ public abstract class Server {
// If the incoming RPC included tracing info, always continue the trace
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId());
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
traceSpan = Trace.startSpan(
RpcClientUtil.toTraceName(rpcRequest.toString()),
parentSpan).detach();
}
Call call = new Call(header.getCallId(), header.getRetryCount(),

View File

@ -67,18 +67,18 @@ public class TestTracing {
String[] expectedSpanNames = {
"testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
"ClientProtocol#create",
"ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
"ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientProtocol#complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#write",
"DFSOutputStream#close",
"dataStreamer",
"OpWriteBlockProto",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
"ClientProtocol#addBlock",
"ClientNamenodeProtocol#addBlock"
};
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
@ -95,11 +95,11 @@ public class TestTracing {
// and children of them are exception.
String[] spansInTopTrace = {
"testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
"ClientProtocol#create",
"ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
"ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientProtocol#complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#write",
@ -113,7 +113,7 @@ public class TestTracing {
// test for timeline annotation added by HADOOP-11242
Assert.assertEquals("called",
map.get("org.apache.hadoop.hdfs.protocol.ClientProtocol.create")
map.get("ClientProtocol#create")
.get(0).getTimelineAnnotations()
.get(0).getMessage());
@ -131,7 +131,7 @@ public class TestTracing {
String[] expectedSpanNames = {
"testReadTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
"ClientProtocol#getBlockLocations",
"ClientNamenodeProtocol#getBlockLocations",
"OpReadBlockProto"
};