HADOOP-12447. Clean up some htrace integration issues (cmccabe)

This commit is contained in:
Colin Patrick Mccabe 2015-09-29 09:25:11 -07:00
parent a0b5a0a419
commit 850d679acb
9 changed files with 20 additions and 27 deletions

View File

@ -85,8 +85,8 @@ You need to run the command against all servers if you want to update the config
You need to specify the class name of span receiver as argument of `-class` option. You need to specify the class name of span receiver as argument of `-class` option.
You can specify the configuration associated with span receiver by `-Ckey=value` options. You can specify the configuration associated with span receiver by `-Ckey=value` options.
$ hadoop trace -add -class LocalFileSpanReceiver -Cdfs.htrace.local-file-span-receiver.path=/tmp/htrace.out -host 192.168.56.2:9000 $ hadoop trace -add -class org.apache.htrace.core.LocalFileSpanReceiver -Chadoop.htrace.local.file.span.receiver.path=/tmp/htrace.out -host 192.168.56.2:9000
Added trace span receiver 2 with configuration dfs.htrace.local-file-span-receiver.path = /tmp/htrace.out Added trace span receiver 2 with configuration hadoop.htrace.local.file.span.receiver.path = /tmp/htrace.out
$ hadoop trace -list -host 192.168.56.2:9000 $ hadoop trace -list -host 192.168.56.2:9000
ID CLASS ID CLASS
@ -137,8 +137,7 @@ which start tracing span before invoking HDFS shell command.
FsShell shell = new FsShell(); FsShell shell = new FsShell();
conf.setQuietMode(false); conf.setQuietMode(false);
shell.setConf(conf); shell.setConf(conf);
Tracer tracer = new Tracer.Builder(). Tracer tracer = new Tracer.Builder("TracingFsShell").
name("TracingFsShell).
conf(TraceUtils.wrapHadoopConf("tracing.fs.shell.htrace.", conf)). conf(TraceUtils.wrapHadoopConf("tracing.fs.shell.htrace.", conf)).
build(); build();
int res = 0; int res = 0;
@ -177,15 +176,15 @@ ProbabilitySampler.
```xml ```xml
<property> <property>
<name>dfs.client.htrace.spanreceiver.classes</name> <name>hadoop.htrace.span.receiver.classes</name>
<value>LocalFileSpanReceiver</value> <value>LocalFileSpanReceiver</value>
</property> </property>
<property> <property>
<name>dfs.client.htrace.sampler</name> <name>fs.client.htrace.sampler.classes</name>
<value>ProbabilitySampler</value> <value>ProbabilitySampler</value>
</property> </property>
<property> <property>
<name>dfs.client.htrace.sampler.fraction</name> <name>fs.client.htrace.sampler.fraction</name>
<value>0.5</value> <value>0.01</value>
</property> </property>
``` ```

View File

@ -149,8 +149,6 @@ public interface HdfsClientConfigKeys {
"dfs.client.test.drop.namenode.response.number"; "dfs.client.test.drop.namenode.response.number";
int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0;
String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces";
// HDFS client HTrace configuration.
String DFS_CLIENT_HTRACE_PREFIX = "dfs.client.htrace.";
String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix"; String DFS_USER_HOME_DIR_PREFIX_KEY = "dfs.user.home.dir.prefix";
String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user"; String DFS_USER_HOME_DIR_PREFIX_DEFAULT = "/user";

View File

@ -64,9 +64,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT = public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT; HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
public static final String DFS_CLIENT_HTRACE_SAMPLER_CLASSES =
"dfs.client.htrace.sampler.classes";
// HA related configuration // HA related configuration
public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration"; public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
public static final long DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT = 50; public static final long DFS_DATANODE_RESTART_REPLICA_EXPIRY_DEFAULT = 50;

View File

@ -419,7 +419,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
UnresolvedPathException.class); UnresolvedPathException.class);
clientRpcServer.setTracer(nn.tracer); clientRpcServer.setTracer(nn.tracer);
if (serviceRpcServer != null) { if (serviceRpcServer != null) {
clientRpcServer.setTracer(nn.tracer); serviceRpcServer.setTracer(nn.tracer);
} }
} }

View File

@ -80,6 +80,7 @@ import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.tracing.TraceUtils;
import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Time;
import org.apache.htrace.core.Tracer; import org.apache.htrace.core.Tracer;
@ -202,7 +203,9 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
this.staleInterval = this.staleInterval =
conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY, conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT); DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
this.tracer = new Tracer.Builder("NamenodeFsck").build(); this.tracer = new Tracer.Builder("NamenodeFsck").
conf(TraceUtils.wrapHadoopConf("namenode.fsck.htrace.", conf)).
build();
for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) { for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
String key = it.next(); String key = it.next();

View File

@ -2427,14 +2427,6 @@
</description> </description>
</property> </property>
<property>
<name>dfs.client.htrace.sampler.classes</name>
<value></value>
<description>
The class names of the HTrace Samplers to use for the HDFS client.
</description>
</property>
<property> <property>
<name>dfs.ha.zkfc.nn.http.timeout.ms</name> <name>dfs.ha.zkfc.nn.http.timeout.ms</name>
<value>20000</value> <value>20000</value>

View File

@ -33,6 +33,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory; import org.apache.hadoop.hdfs.BlockReaderFactory;
@ -161,6 +162,7 @@ public class TestBlockTokenWithDFS {
setCachingStrategy(CachingStrategy.newDefaultStrategy()). setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)). setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf). setConfiguration(conf).
setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() { setRemotePeerFactory(new RemotePeerFactory() {
@Override @Override
public Peer newConnectedPeer(InetSocketAddress addr, public Peer newConnectedPeer(InetSocketAddress addr,

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.ReconfigurationException; import org.apache.hadoop.conf.ReconfigurationException;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.FsTracer;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.BlockReaderFactory; import org.apache.hadoop.hdfs.BlockReaderFactory;
@ -515,6 +516,7 @@ public class TestDataNodeVolumeFailure {
setCachingStrategy(CachingStrategy.newDefaultStrategy()). setCachingStrategy(CachingStrategy.newDefaultStrategy()).
setClientCacheContext(ClientContext.getFromConf(conf)). setClientCacheContext(ClientContext.getFromConf(conf)).
setConfiguration(conf). setConfiguration(conf).
setTracer(FsTracer.get(conf)).
setRemotePeerFactory(new RemotePeerFactory() { setRemotePeerFactory(new RemotePeerFactory() {
@Override @Override
public Peer newConnectedPeer(InetSocketAddress addr, public Peer newConnectedPeer(InetSocketAddress addr,

View File

@ -73,10 +73,10 @@ public class TestTraceAdmin {
Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE, Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster))); runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 1 with " + Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
"configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE, "configuration hadoop.htrace.local.file.span.receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster), runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "org.apache.htrace.core.LocalFileSpanReceiver", "-class", "org.apache.htrace.core.LocalFileSpanReceiver",
"-Cdfs.htrace.local-file-span-receiver.path=" + tracePath)); "-Chadoop.htrace.local.file.span.receiver.path=" + tracePath));
String list = String list =
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)); runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
Assert.assertTrue(list.startsWith("ret:0")); Assert.assertTrue(list.startsWith("ret:0"));
@ -87,10 +87,10 @@ public class TestTraceAdmin {
Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE, Assert.assertEquals("ret:0, [no span receivers found]" + NEWLINE,
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster))); runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster)));
Assert.assertEquals("ret:0, Added trace span receiver 2 with " + Assert.assertEquals("ret:0, Added trace span receiver 2 with " +
"configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE, "configuration hadoop.htrace.local.file.span.receiver.path = " + tracePath + NEWLINE,
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster), runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
"-class", "LocalFileSpanReceiver", "-class", "LocalFileSpanReceiver",
"-Cdfs.htrace.local-file-span-receiver.path=" + tracePath)); "-Chadoop.htrace.local.file.span.receiver.path=" + tracePath));
Assert.assertEquals("ret:0, Removed trace span receiver 2" + NEWLINE, Assert.assertEquals("ret:0, Removed trace span receiver 2" + NEWLINE,
runTraceCommand(trace, "-remove", "2", "-host", runTraceCommand(trace, "-remove", "2", "-host",
getHostPortForNN(cluster))); getHostPortForNN(cluster)));