diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3b11a94c7d8..9d7398a16be 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -170,6 +170,8 @@ Release 2.8.0 - UNRELEASED HADOOP-12124. Add HTrace support for FsShell (cmccabe) + HADOOP-12171. Shorten overly-long htrace span names for server (cmccabe) + OPTIMIZATIONS HADOOP-11785. Reduce the number of listStatus operation in distcp diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java index d9bd71b1f04..da1e69965a9 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java @@ -210,4 +210,28 @@ public class RpcClientUtil { } return clazz.getSimpleName() + "#" + method.getName(); } + + /** + * Convert an RPC class method to a string. + * The format we want is + * 'SecondOutermostClassShortName#OutermostClassShortName'. + * + * For example, if the full class name is: + * org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations + * + * the format we want is: + * ClientProtocol#getBlockLocations + */ + public static String toTraceName(String fullName) { + int lastPeriod = fullName.lastIndexOf('.'); + if (lastPeriod < 0) { + return fullName; + } + int secondLastPeriod = fullName.lastIndexOf('.', lastPeriod - 1); + if (secondLastPeriod < 0) { + return fullName; + } + return fullName.substring(secondLastPeriod + 1, lastPeriod) + "#" + + fullName.substring(lastPeriod + 1); + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 72703343f4a..baad622b13f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -1878,7 +1878,9 @@ public abstract class Server { // If the incoming RPC included tracing info, always continue the trace TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId()); - traceSpan = Trace.startSpan(rpcRequest.toString(), parentSpan).detach(); + traceSpan = Trace.startSpan( + RpcClientUtil.toTraceName(rpcRequest.toString()), + parentSpan).detach(); } Call call = new Call(header.getCallId(), header.getRetryCount(), diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java index 58b3659606d..c3d2c73bde7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTracing.java @@ -67,18 +67,18 @@ public class TestTracing { String[] expectedSpanNames = { "testWriteTraceHooks", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.create", + "ClientProtocol#create", "ClientNamenodeProtocol#create", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", + "ClientProtocol#fsync", "ClientNamenodeProtocol#fsync", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", + "ClientProtocol#complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#write", "DFSOutputStream#close", "dataStreamer", "OpWriteBlockProto", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.addBlock", + "ClientProtocol#addBlock", "ClientNamenodeProtocol#addBlock" }; SetSpanReceiver.assertSpanNamesFound(expectedSpanNames); @@ -95,11 +95,11 @@ public class TestTracing { // and children of them are exception. String[] spansInTopTrace = { "testWriteTraceHooks", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.create", + "ClientProtocol#create", "ClientNamenodeProtocol#create", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.fsync", + "ClientProtocol#fsync", "ClientNamenodeProtocol#fsync", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.complete", + "ClientProtocol#complete", "ClientNamenodeProtocol#complete", "newStreamForCreate", "DFSOutputStream#write", @@ -113,7 +113,7 @@ public class TestTracing { // test for timeline annotation added by HADOOP-11242 Assert.assertEquals("called", - map.get("org.apache.hadoop.hdfs.protocol.ClientProtocol.create") + map.get("ClientProtocol#create") .get(0).getTimelineAnnotations() .get(0).getMessage()); @@ -131,7 +131,7 @@ public class TestTracing { String[] expectedSpanNames = { "testReadTraceHooks", - "org.apache.hadoop.hdfs.protocol.ClientProtocol.getBlockLocations", + "ClientProtocol#getBlockLocations", "ClientNamenodeProtocol#getBlockLocations", "OpReadBlockProto" };