HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)

(cherry picked from commit a78d5074fb)
This commit is contained in:
Colin Patrick Mccabe 2015-07-01 17:57:11 -07:00
parent d61dd10b50
commit 490bef0856
4 changed files with 38 additions and 10 deletions

View File

@ -170,6 +170,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12124. Add HTrace support for FsShell (cmccabe)
HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
OPTIMIZATIONS
HADOOP-11785. Reduce the number of listStatus operation in distcp

View File

@ -210,4 +210,28 @@ public static String methodToTraceString(Method method) {
}
return clazz.getSimpleName() + "#" + method.getName();
}
/**
* Convert an RPC class method to a string.
* The format we want is
* 'SecondOutermostClassShortName#OutermostClassShortName'.
*
* For example, if the full class name is:
* org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations
*
* the format we want is:
* ClientProtocol#getBlockLocations
*/
public static String toTraceName(String fullName) {
int lastPeriod = fullName.lastIndexOf('.');
if (lastPeriod < 0) {
return fullName;
}
int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1);
if (secondLastPeriod < 0) {
return fullName;
}
return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" +
fullName.substring(lastPeriod + 1);
}
}

View File

@ -1878,7 +1878,9 @@ private void processRpcRequest(RpcRequestHeaderProto header,
// If the incoming RPC included tracing info, always continue the trace
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
header.getTraceInfo().getParentId());
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
traceSpan = Trace.startSpan(
RpcClientUtil.toTraceName(rpcRequest.toString()),
parentSpan).detach();
}
Call call = new Call(header.getCallId(), header.getRetryCount(),

View File

@ -67,18 +67,18 @@ public void writeWithTracing() throws Exception {
String[] expectedSpanNames = {
"testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
"ClientProtocol#create",
"ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
"ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientProtocol#complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#write",
"DFSOutputStream#close",
"dataStreamer",
"OpWriteBlockProto",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
"ClientProtocol#addBlock",
"ClientNamenodeProtocol#addBlock"
};
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
@ -95,11 +95,11 @@ public void writeWithTracing() throws Exception {
// and children of them are exception.
String[] spansInTopTrace = {
"testWriteTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
"ClientProtocol#create",
"ClientNamenodeProtocol#create",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
"ClientProtocol#fsync",
"ClientNamenodeProtocol#fsync",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
"ClientProtocol#complete",
"ClientNamenodeProtocol#complete",
"newStreamForCreate",
"DFSOutputStream#write",
@ -113,7 +113,7 @@ public void writeWithTracing() throws Exception {
// test for timeline annotation added by HADOOP-11242
Assert.assertEquals("called",
map.get("org.apache.hadoop.hdfs.protocol.ClientProtocol.create")
map.get("ClientProtocol#create")
.get(0).getTimelineAnnotations()
.get(0).getMessage());
@ -131,7 +131,7 @@ public void readWithTracing() throws Exception {
String[] expectedSpanNames = {
"testReadTraceHooks",
"org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
"ClientProtocol#getBlockLocations",
"ClientNamenodeProtocol#getBlockLocations",
"OpReadBlockProto"
};