HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
(cherry picked from commit a78d5074fb
)
This commit is contained in:
parent
d61dd10b50
commit
490bef0856
|
@ -170,6 +170,8 @@ Release 2.8.0 - UNRELEASED
|
||||||
|
|
||||||
HADOOP-12124. Add HTrace support for FsShell (cmccabe)
|
HADOOP-12124. Add HTrace support for FsShell (cmccabe)
|
||||||
|
|
||||||
|
HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HADOOP-11785. Reduce the number of listStatus operation in distcp
|
HADOOP-11785. Reduce the number of listStatus operation in distcp
|
||||||
|
|
|
@ -210,4 +210,28 @@ public class RpcClientUtil {
|
||||||
}
|
}
|
||||||
return clazz.getSimpleName() + "#" + method.getName();
|
return clazz.getSimpleName() + "#" + method.getName();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Convert an RPC class method to a string.
|
||||||
|
* The format we want is
|
||||||
|
* 'SecondOutermostClassShortName#OutermostClassShortName'.
|
||||||
|
*
|
||||||
|
* For example, if the full class name is:
|
||||||
|
* org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations
|
||||||
|
*
|
||||||
|
* the format we want is:
|
||||||
|
* ClientProtocol#getBlockLocations
|
||||||
|
*/
|
||||||
|
public static String toTraceName(String fullName) {
|
||||||
|
int lastPeriod = fullName.lastIndexOf('.');
|
||||||
|
if (lastPeriod < 0) {
|
||||||
|
return fullName;
|
||||||
|
}
|
||||||
|
int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1);
|
||||||
|
if (secondLastPeriod < 0) {
|
||||||
|
return fullName;
|
||||||
|
}
|
||||||
|
return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" +
|
||||||
|
fullName.substring(lastPeriod + 1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1878,7 +1878,9 @@ public abstract class Server {
|
||||||
// If the incoming RPC included tracing info, always continue the trace
|
// If the incoming RPC included tracing info, always continue the trace
|
||||||
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
|
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
|
||||||
header.getTraceInfo().getParentId());
|
header.getTraceInfo().getParentId());
|
||||||
traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach();
|
traceSpan = Trace.startSpan(
|
||||||
|
RpcClientUtil.toTraceName(rpcRequest.toString()),
|
||||||
|
parentSpan).detach();
|
||||||
}
|
}
|
||||||
|
|
||||||
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
||||||
|
|
|
@ -67,18 +67,18 @@ public class TestTracing {
|
||||||
|
|
||||||
String[] expectedSpanNames = {
|
String[] expectedSpanNames = {
|
||||||
"testWriteTraceHooks",
|
"testWriteTraceHooks",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
|
"ClientProtocol#create",
|
||||||
"ClientNamenodeProtocol#create",
|
"ClientNamenodeProtocol#create",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
|
"ClientProtocol#fsync",
|
||||||
"ClientNamenodeProtocol#fsync",
|
"ClientNamenodeProtocol#fsync",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
|
"ClientProtocol#complete",
|
||||||
"ClientNamenodeProtocol#complete",
|
"ClientNamenodeProtocol#complete",
|
||||||
"newStreamForCreate",
|
"newStreamForCreate",
|
||||||
"DFSOutputStream#write",
|
"DFSOutputStream#write",
|
||||||
"DFSOutputStream#close",
|
"DFSOutputStream#close",
|
||||||
"dataStreamer",
|
"dataStreamer",
|
||||||
"OpWriteBlockProto",
|
"OpWriteBlockProto",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock",
|
"ClientProtocol#addBlock",
|
||||||
"ClientNamenodeProtocol#addBlock"
|
"ClientNamenodeProtocol#addBlock"
|
||||||
};
|
};
|
||||||
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
|
SetSpanReceiver.assertSpanNamesFound(expectedSpanNames);
|
||||||
|
@ -95,11 +95,11 @@ public class TestTracing {
|
||||||
// and children of them are exception.
|
// and children of them are exception.
|
||||||
String[] spansInTopTrace = {
|
String[] spansInTopTrace = {
|
||||||
"testWriteTraceHooks",
|
"testWriteTraceHooks",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.create",
|
"ClientProtocol#create",
|
||||||
"ClientNamenodeProtocol#create",
|
"ClientNamenodeProtocol#create",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync",
|
"ClientProtocol#fsync",
|
||||||
"ClientNamenodeProtocol#fsync",
|
"ClientNamenodeProtocol#fsync",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.complete",
|
"ClientProtocol#complete",
|
||||||
"ClientNamenodeProtocol#complete",
|
"ClientNamenodeProtocol#complete",
|
||||||
"newStreamForCreate",
|
"newStreamForCreate",
|
||||||
"DFSOutputStream#write",
|
"DFSOutputStream#write",
|
||||||
|
@ -113,7 +113,7 @@ public class TestTracing {
|
||||||
|
|
||||||
// test for timeline annotation added by HADOOP-11242
|
// test for timeline annotation added by HADOOP-11242
|
||||||
Assert.assertEquals("called",
|
Assert.assertEquals("called",
|
||||||
map.get("org.apache.hadoop.hdfs.protocol.ClientProtocol.create")
|
map.get("ClientProtocol#create")
|
||||||
.get(0).getTimelineAnnotations()
|
.get(0).getTimelineAnnotations()
|
||||||
.get(0).getMessage());
|
.get(0).getMessage());
|
||||||
|
|
||||||
|
@ -131,7 +131,7 @@ public class TestTracing {
|
||||||
|
|
||||||
String[] expectedSpanNames = {
|
String[] expectedSpanNames = {
|
||||||
"testReadTraceHooks",
|
"testReadTraceHooks",
|
||||||
"org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations",
|
"ClientProtocol#getBlockLocations",
|
||||||
"ClientNamenodeProtocol#getBlockLocations",
|
"ClientNamenodeProtocol#getBlockLocations",
|
||||||
"OpReadBlockProto"
|
"OpReadBlockProto"
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue