diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2c0f70854e1..0689313dea1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -593,6 +593,8 @@ Release 2.8.0 - UNRELEASED
HADOOP-12440. TestRPC#testRPCServerShutdown did not produce the desired
thread states before shutting down. (Xiao Chen via mingma)
+ HADOOP-12447. Clean up some htrace integration issues (cmccabe)
+
OPTIMIZATIONS
HADOOP-12051. ProtobufRpcEngine.invoke() should use Exception.toString()
diff --git a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
index 78978550659..4cc6a0777fc 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/Tracing.md
@@ -85,8 +85,8 @@ You need to run the command against all servers if you want to update the config
You need to specify the class name of span receiver as argument of `-class` option.
You can specify the configuration associated with span receiver by `-Ckey=value` options.
- $ hadoop trace -add -class LocalFileSpanReceiver -Cdfs.htrace.local-file-span-receiver.path=/tmp/htrace.out -host 192.168.56.2:9000
- Added trace span receiver 2 with configuration dfs.htrace.local-file-span-receiver.path = /tmp/htrace.out
+ $ hadoop trace -add -class org.apache.htrace.core.LocalFileSpanReceiver -Chadoop.htrace.local.file.span.receiver.path=/tmp/htrace.out -host 192.168.56.2:9000
+ Added trace span receiver 2 with configuration hadoop.htrace.local.file.span.receiver.path = /tmp/htrace.out
$ hadoop trace -list -host 192.168.56.2:9000
ID CLASS
@@ -137,8 +137,7 @@ which start tracing span before invoking HDFS shell command.
FsShell shell = new FsShell();
conf.setQuietMode(false);
shell.setConf(conf);
- Tracer tracer = new Tracer.Builder().
- name("TracingFsShell).
+ Tracer tracer = new Tracer.Builder("TracingFsShell").
conf(TraceUtils.wrapHadoopConf("tracing.fs.shell.htrace.", conf)).
build();
int res = 0;
@@ -177,15 +176,15 @@ ProbabilitySampler.
```xml
- dfs.client.htrace.spanreceiver.classes
+ hadoop.htrace.span.receiver.classes
LocalFileSpanReceiver
- dfs.client.htrace.sampler
+ fs.client.htrace.sampler.classes
ProbabilitySampler
- dfs.client.htrace.sampler.fraction
- 0.5
+ fs.client.htrace.sampler.fraction
+ 0.01
```
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 1a905559552..13cd782cc53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -153,8 +153,6 @@ public interface HdfsClientConfigKeys {
"dfs.client.test.drop.namenode.response.number";
int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
- // HDFS client HTrace configuration.
- String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 416b9e45c33..a9097177e10 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -64,9 +64,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
- public static final String DFS_CLIENT_HTRACE_SAMPLER_CLASSES =
- "dfs.client.htrace.sampler.classes";
-
// HA related configuration
public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
public static final long DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT = 50;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 19df7d6f5f0..a19fdd0bf4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -414,7 +414,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
UnresolvedPathException.class);
clientRpcServer.setTracer(nn.tracer);
if (serviceRpcServer != null) {
- clientRpcServer.setTracer(nn.tracer);
+ serviceRpcServer.setTracer(nn.tracer);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 3fe3d49e83b..1286d52d9a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -80,6 +80,7 @@ import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.Time;
import org.apache.htrace.core.Tracer;
@@ -201,7 +202,9 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
this.staleInterval =
conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
- this.tracer = new Tracer.Builder("NamenodeFsck").build();
+ this.tracer = new Tracer.Builder("NamenodeFsck").
+ conf(TraceUtils.wrapHadoopConf("namenode.fsck.htrace.", conf)).
+ build();
for (Iterator it = pmap.keySet().iterator(); it.hasNext();) {
String key = it.next();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index c5d2ed43cca..88ab54f7aff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2439,14 +2439,6 @@
-
- dfs.client.htrace.sampler.classes
-
-
- The class names of the HTrace Samplers to use for the HDFS client.
-
-
-
dfs.ha.zkfc.nn.http.timeout.ms
20000
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
index 50d548a8b4e..e39ab3e00d0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockTokenWithDFS.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
@@ -161,6 +162,7 @@ public class TestBlockTokenWithDFS {
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
+ setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index cb50edc9549..2c4fcc5454a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory;
@@ -515,6 +516,7 @@ public class TestDataNodeVolumeFailure {
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf).
+ setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() {
@Override
public Peer newConnectedPeer(InetSocketAddress addr,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
index b08866b5c8d..7e10d90b9e5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tracing/TestTraceAdmin.java
@@ -73,10 +73,10 @@ public class TestTraceAdmin {
Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
- "configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE,
+ "configuration hadoop.htrace.local.file.span.receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "org.apache.htrace.core.LocalFileSpanReceiver",
- "-Cdfs.htrace.local-file-span-receiver.path=" + tracePath));
+ "-Chadoop.htrace.local.file.span.receiver.path=" + tracePath));
String list =
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
Assert.assertTrue(list.startsWith("ret:0"));
@@ -87,10 +87,10 @@ public class TestTraceAdmin {
Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 2 with " +
- "configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE,
+ "configuration hadoop.htrace.local.file.span.receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "LocalFileSpanReceiver",
- "-Cdfs.htrace.local-file-span-receiver.path=" + tracePath));
+ "-Chadoop.htrace.local.file.span.receiver.path=" + tracePath));
Assert.assertEquals("ret:0, Removed trace span receiver 2" + NEWLINE,
runTraceCommand(trace, "-remove", "2", "-host",
getHostPortForNN(cluster)));