HDFS-9080. Update htrace version to 4.0.1 (cmccabe)
(cherry picked from commit 892ade689f
)
Conflicts:
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/BlockStorageLocationUtil.java
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
This commit is contained in:
parent
03e370e306
commit
3b9d32627f
|
@ -246,7 +246,7 @@
|
|||
|
||||
<dependency>
|
||||
<groupId>org.apache.htrace</groupId>
|
||||
<artifactId>htrace-core</artifactId>
|
||||
<artifactId>htrace-core4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.zookeeper</groupId>
|
||||
|
|
|
@ -295,4 +295,7 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
|
|||
public static final String NFS_EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";";
|
||||
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY = "nfs.exports.allowed.hosts";
|
||||
public static final String NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw";
|
||||
|
||||
// HDFS client HTrace configuration.
|
||||
public static final String FS_CLIENT_HTRACE_PREFIX = "fs.client.htrace.";
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@ package org.apache.hadoop.fs;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.htrace.NullScope;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.OutputStream;
|
||||
|
@ -43,6 +43,8 @@ abstract public class FSOutputSummer extends OutputStream {
|
|||
private byte checksum[];
|
||||
// The number of valid bytes in the buffer.
|
||||
private int count;
|
||||
// The HTrace tracer to use
|
||||
private Tracer tracer;
|
||||
|
||||
// We want this value to be a multiple of 3 because the native code checksums
|
||||
// 3 chunks simultaneously. The chosen value of 9 strikes a balance between
|
||||
|
@ -197,7 +199,7 @@ abstract public class FSOutputSummer extends OutputStream {
|
|||
}
|
||||
|
||||
protected TraceScope createWriteTraceScope() {
|
||||
return NullScope.INSTANCE;
|
||||
return null;
|
||||
}
|
||||
|
||||
/** Generate checksums for the given data chunks and output chunks & checksums
|
||||
|
@ -215,7 +217,9 @@ abstract public class FSOutputSummer extends OutputStream {
|
|||
getChecksumSize());
|
||||
}
|
||||
} finally {
|
||||
scope.close();
|
||||
if (scope != null) {
|
||||
scope.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.security.token.Token;
|
|||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/**
|
||||
* The FileContext class provides an interface to the application writer for
|
||||
|
@ -232,12 +233,14 @@ public class FileContext {
|
|||
private final Configuration conf;
|
||||
private final UserGroupInformation ugi;
|
||||
final boolean resolveSymlinks;
|
||||
private final Tracer tracer;
|
||||
|
||||
private FileContext(final AbstractFileSystem defFs,
|
||||
final FsPermission theUmask, final Configuration aConf) {
|
||||
defaultFS = defFs;
|
||||
umask = FsPermission.getUMask(aConf);
|
||||
conf = aConf;
|
||||
tracer = FsTracer.get(aConf);
|
||||
try {
|
||||
ugi = UserGroupInformation.getCurrentUser();
|
||||
} catch (IOException e) {
|
||||
|
@ -2731,4 +2734,8 @@ public class FileContext {
|
|||
throws IOException {
|
||||
return defaultFS.getAllStoragePolicies();
|
||||
}
|
||||
|
||||
Tracer getTracer() {
|
||||
return tracer;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -67,9 +67,8 @@ import org.apache.hadoop.util.Progressable;
|
|||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
|
@ -129,6 +128,13 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
private Set<Path> deleteOnExit = new TreeSet<Path>();
|
||||
|
||||
boolean resolveSymlinks;
|
||||
|
||||
private Tracer tracer;
|
||||
|
||||
protected final Tracer getTracer() {
|
||||
return tracer;
|
||||
}
|
||||
|
||||
/**
|
||||
* This method adds a file system for testing so that we can find it later. It
|
||||
* is only for testing.
|
||||
|
@ -2692,14 +2698,13 @@ public abstract class FileSystem extends Configured implements Closeable {
|
|||
|
||||
private static FileSystem createFileSystem(URI uri, Configuration conf
|
||||
) throws IOException {
|
||||
TraceScope scope = Trace.startSpan("FileSystem#createFileSystem");
|
||||
Span span = scope.getSpan();
|
||||
if (span != null) {
|
||||
span.addKVAnnotation("scheme", uri.getScheme());
|
||||
}
|
||||
Tracer tracer = FsTracer.get(conf);
|
||||
TraceScope scope = tracer.newScope("FileSystem#createFileSystem");
|
||||
scope.addKVAnnotation("scheme", uri.getScheme());
|
||||
try {
|
||||
Class<?> clazz = getFileSystemClass(uri.getScheme(), conf);
|
||||
FileSystem fs = (FileSystem)ReflectionUtils.newInstance(clazz, conf);
|
||||
fs.tracer = tracer;
|
||||
fs.initialize(uri, conf);
|
||||
return fs;
|
||||
} finally {
|
||||
|
|
|
@ -32,16 +32,13 @@ import org.apache.hadoop.conf.Configured;
|
|||
import org.apache.hadoop.fs.shell.Command;
|
||||
import org.apache.hadoop.fs.shell.CommandFactory;
|
||||
import org.apache.hadoop.fs.shell.FsCommand;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.tools.TableListing;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.SamplerBuilder;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/** Provide command line access to a FileSystem. */
|
||||
@InterfaceAudience.Private
|
||||
|
@ -54,13 +51,12 @@ public class FsShell extends Configured implements Tool {
|
|||
private FileSystem fs;
|
||||
private Trash trash;
|
||||
protected CommandFactory commandFactory;
|
||||
private Sampler traceSampler;
|
||||
|
||||
private final String usagePrefix =
|
||||
"Usage: hadoop fs [generic options]";
|
||||
|
||||
private SpanReceiverHost spanReceiverHost;
|
||||
static final String SEHLL_HTRACE_PREFIX = "dfs.shell.htrace.";
|
||||
private Tracer tracer;
|
||||
static final String SHELL_HTRACE_PREFIX = "fs.shell.htrace.";
|
||||
|
||||
/**
|
||||
* Default ctor with no configuration. Be sure to invoke
|
||||
|
@ -102,8 +98,9 @@ public class FsShell extends Configured implements Tool {
|
|||
commandFactory.addObject(new Usage(), "-usage");
|
||||
registerCommands(commandFactory);
|
||||
}
|
||||
this.spanReceiverHost =
|
||||
SpanReceiverHost.get(getConf(), SEHLL_HTRACE_PREFIX);
|
||||
this.tracer = new Tracer.Builder("FsShell").
|
||||
conf(TraceUtils.wrapHadoopConf(SHELL_HTRACE_PREFIX, getConf())).
|
||||
build();
|
||||
}
|
||||
|
||||
protected void registerCommands(CommandFactory factory) {
|
||||
|
@ -285,8 +282,6 @@ public class FsShell extends Configured implements Tool {
|
|||
public int run(String argv[]) throws Exception {
|
||||
// initialize FsShell
|
||||
init();
|
||||
traceSampler = new SamplerBuilder(TraceUtils.
|
||||
wrapHadoopConf(SEHLL_HTRACE_PREFIX, getConf())).build();
|
||||
int exitCode = -1;
|
||||
if (argv.length < 1) {
|
||||
printUsage(System.err);
|
||||
|
@ -298,7 +293,7 @@ public class FsShell extends Configured implements Tool {
|
|||
if (instance == null) {
|
||||
throw new UnknownCommandException();
|
||||
}
|
||||
TraceScope scope = Trace.startSpan(instance.getCommandName(), traceSampler);
|
||||
TraceScope scope = tracer.newScope(instance.getCommandName());
|
||||
if (scope.getSpan() != null) {
|
||||
String args = StringUtils.join(" ", argv);
|
||||
if (args.length() > 2048) {
|
||||
|
@ -324,6 +319,7 @@ public class FsShell extends Configured implements Tool {
|
|||
e.printStackTrace(System.err);
|
||||
}
|
||||
}
|
||||
tracer.close();
|
||||
return exitCode;
|
||||
}
|
||||
|
||||
|
@ -350,9 +346,6 @@ public class FsShell extends Configured implements Tool {
|
|||
fs.close();
|
||||
fs = null;
|
||||
}
|
||||
if (this.spanReceiverHost != null) {
|
||||
this.spanReceiverHost.closeReceivers();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.fs;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/**
|
||||
* Holds the HTrace Tracer used for FileSystem operations.
|
||||
*
|
||||
* Ideally, this would be owned by the DFSClient, rather than global. However,
|
||||
* the FileContext API may create a new DFSClient for each operation in some
|
||||
* cases. Because of this, we cannot store this Tracer inside DFSClient. See
|
||||
* HADOOP-6356 for details.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
public final class FsTracer {
|
||||
private static Tracer instance;
|
||||
|
||||
public static synchronized Tracer get(Configuration conf) {
|
||||
if (instance == null) {
|
||||
instance = new Tracer.Builder("FSClient").
|
||||
conf(TraceUtils.wrapHadoopConf(CommonConfigurationKeys.
|
||||
FS_CLIENT_HTRACE_PREFIX, conf)).
|
||||
build();
|
||||
}
|
||||
return instance;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static synchronized void clear() {
|
||||
if (instance == null) {
|
||||
return;
|
||||
}
|
||||
try {
|
||||
instance.close();
|
||||
} finally {
|
||||
instance = null;
|
||||
}
|
||||
}
|
||||
|
||||
private FsTracer() {
|
||||
}
|
||||
}
|
|
@ -28,9 +28,8 @@ import org.apache.commons.logging.Log;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Unstable
|
||||
|
@ -41,12 +40,14 @@ class Globber {
|
|||
private final FileContext fc;
|
||||
private final Path pathPattern;
|
||||
private final PathFilter filter;
|
||||
private final Tracer tracer;
|
||||
|
||||
public Globber(FileSystem fs, Path pathPattern, PathFilter filter) {
|
||||
this.fs = fs;
|
||||
this.fc = null;
|
||||
this.pathPattern = pathPattern;
|
||||
this.filter = filter;
|
||||
this.tracer = fs.getTracer();
|
||||
}
|
||||
|
||||
public Globber(FileContext fc, Path pathPattern, PathFilter filter) {
|
||||
|
@ -54,6 +55,7 @@ class Globber {
|
|||
this.fc = fc;
|
||||
this.pathPattern = pathPattern;
|
||||
this.filter = filter;
|
||||
this.tracer = fc.getTracer();
|
||||
}
|
||||
|
||||
private FileStatus getFileStatus(Path path) throws IOException {
|
||||
|
@ -140,11 +142,8 @@ class Globber {
|
|||
}
|
||||
|
||||
public FileStatus[] glob() throws IOException {
|
||||
TraceScope scope = Trace.startSpan("Globber#glob");
|
||||
Span span = scope.getSpan();
|
||||
if (span != null) {
|
||||
span.addKVAnnotation("pattern", pathPattern.toUri().getPath());
|
||||
}
|
||||
TraceScope scope = tracer.newScope("Globber#glob");
|
||||
scope.addKVAnnotation("pattern", pathPattern.toUri().getPath());
|
||||
try {
|
||||
return doGlob();
|
||||
} finally {
|
||||
|
|
|
@ -92,7 +92,8 @@ import org.apache.hadoop.util.ProtoUtil;
|
|||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -722,8 +723,9 @@ public class Client {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Connecting to "+server);
|
||||
}
|
||||
if (Trace.isTracing()) {
|
||||
Trace.addTimelineAnnotation("IPC client connecting to " + server);
|
||||
Span span = Tracer.getCurrentSpan();
|
||||
if (span != null) {
|
||||
span.addTimelineAnnotation("IPC client connecting to " + server);
|
||||
}
|
||||
short numRetries = 0;
|
||||
Random rand = null;
|
||||
|
@ -796,8 +798,9 @@ public class Client {
|
|||
// update last activity time
|
||||
touch();
|
||||
|
||||
if (Trace.isTracing()) {
|
||||
Trace.addTimelineAnnotation("IPC client connected to " + server);
|
||||
span = Tracer.getCurrentSpan();
|
||||
if (span != null) {
|
||||
span.addTimelineAnnotation("IPC client connected to " + server);
|
||||
}
|
||||
|
||||
// start the receiver thread after the socket connection has been set
|
||||
|
|
|
@ -49,8 +49,8 @@ import org.apache.hadoop.security.token.SecretManager;
|
|||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.util.ProtoUtil;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.BlockingService;
|
||||
|
@ -206,12 +206,13 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
+ method.getName() + "]");
|
||||
}
|
||||
|
||||
TraceScope traceScope = null;
|
||||
// if Tracing is on then start a new span for this rpc.
|
||||
// guard it in the if statement to make sure there isn't
|
||||
// any extra string manipulation.
|
||||
if (Trace.isTracing()) {
|
||||
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
|
||||
Tracer tracer = Tracer.curThreadTracer();
|
||||
TraceScope traceScope = null;
|
||||
if (tracer != null) {
|
||||
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
|
||||
}
|
||||
|
||||
RequestHeaderProto rpcRequestHeader = constructRpcRequestHeader(method);
|
||||
|
@ -236,9 +237,9 @@ public class ProtobufRpcEngine implements RpcEngine {
|
|||
remoteId + ": " + method.getName() +
|
||||
" {" + e + "}");
|
||||
}
|
||||
if (Trace.isTracing()) {
|
||||
traceScope.getSpan().addTimelineAnnotation(
|
||||
"Call got exception: " + e.toString());
|
||||
if (traceScope != null) {
|
||||
traceScope.addTimelineAnnotation("Call got exception: " +
|
||||
e.toString());
|
||||
}
|
||||
throw new ServiceException(e);
|
||||
} finally {
|
||||
|
|
|
@ -116,10 +116,9 @@ import org.apache.hadoop.util.ProtoUtil;
|
|||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceInfo;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.ByteString;
|
||||
|
@ -138,6 +137,7 @@ public abstract class Server {
|
|||
private List<AuthMethod> enabledAuthMethods;
|
||||
private RpcSaslProto negotiateResponse;
|
||||
private ExceptionsHandler exceptionsHandler = new ExceptionsHandler();
|
||||
private Tracer tracer;
|
||||
|
||||
public void addTerseExceptions(Class<?>... exceptionClass) {
|
||||
exceptionsHandler.addTerseExceptions(exceptionClass);
|
||||
|
@ -578,7 +578,7 @@ public abstract class Server {
|
|||
private ByteBuffer rpcResponse; // the response for this call
|
||||
private final RPC.RpcKind rpcKind;
|
||||
private final byte[] clientId;
|
||||
private final Span traceSpan; // the tracing span on the server side
|
||||
private final TraceScope traceScope; // the HTrace scope on the server side
|
||||
|
||||
public Call(int id, int retryCount, Writable param,
|
||||
Connection connection) {
|
||||
|
@ -592,7 +592,7 @@ public abstract class Server {
|
|||
}
|
||||
|
||||
public Call(int id, int retryCount, Writable param, Connection connection,
|
||||
RPC.RpcKind kind, byte[] clientId, Span span) {
|
||||
RPC.RpcKind kind, byte[] clientId, TraceScope traceScope) {
|
||||
this.callId = id;
|
||||
this.retryCount = retryCount;
|
||||
this.rpcRequest = param;
|
||||
|
@ -601,7 +601,7 @@ public abstract class Server {
|
|||
this.rpcResponse = null;
|
||||
this.rpcKind = kind;
|
||||
this.clientId = clientId;
|
||||
this.traceSpan = span;
|
||||
this.traceScope = traceScope;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -1929,19 +1929,24 @@ public abstract class Server {
|
|||
RpcErrorCodeProto.FATAL_DESERIALIZING_REQUEST, err);
|
||||
}
|
||||
|
||||
Span traceSpan = null;
|
||||
TraceScope traceScope = null;
|
||||
if (header.hasTraceInfo()) {
|
||||
// If the incoming RPC included tracing info, always continue the trace
|
||||
TraceInfo parentSpan = new TraceInfo(header.getTraceInfo().getTraceId(),
|
||||
header.getTraceInfo().getParentId());
|
||||
traceSpan = Trace.startSpan(
|
||||
RpcClientUtil.toTraceName(rpcRequest.toString()),
|
||||
parentSpan).detach();
|
||||
if (tracer != null) {
|
||||
// If the incoming RPC included tracing info, always continue the
|
||||
// trace
|
||||
SpanId parentSpanId = new SpanId(
|
||||
header.getTraceInfo().getTraceId(),
|
||||
header.getTraceInfo().getParentId());
|
||||
traceScope = tracer.newScope(
|
||||
RpcClientUtil.toTraceName(rpcRequest.toString()),
|
||||
parentSpanId);
|
||||
traceScope.detach();
|
||||
}
|
||||
}
|
||||
|
||||
Call call = new Call(header.getCallId(), header.getRetryCount(),
|
||||
rpcRequest, this, ProtoUtil.convert(header.getRpcKind()),
|
||||
header.getClientId().toByteArray(), traceSpan);
|
||||
header.getClientId().toByteArray(), traceScope);
|
||||
|
||||
if (callQueue.isClientBackoffEnabled()) {
|
||||
// if RPC queue is full, we will ask the RPC client to back off by
|
||||
|
@ -2123,8 +2128,9 @@ public abstract class Server {
|
|||
Writable value = null;
|
||||
|
||||
CurCall.set(call);
|
||||
if (call.traceSpan != null) {
|
||||
traceScope = Trace.continueSpan(call.traceSpan);
|
||||
if (call.traceScope != null) {
|
||||
call.traceScope.reattach();
|
||||
traceScope = call.traceScope;
|
||||
traceScope.getSpan().addTimelineAnnotation("called");
|
||||
}
|
||||
|
||||
|
@ -2201,21 +2207,18 @@ public abstract class Server {
|
|||
} catch (InterruptedException e) {
|
||||
if (running) { // unexpected -- log it
|
||||
LOG.info(Thread.currentThread().getName() + " unexpectedly interrupted", e);
|
||||
if (Trace.isTracing()) {
|
||||
if (traceScope != null) {
|
||||
traceScope.getSpan().addTimelineAnnotation("unexpectedly interrupted: " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
}
|
||||
} catch (Exception e) {
|
||||
LOG.info(Thread.currentThread().getName() + " caught an exception", e);
|
||||
if (Trace.isTracing()) {
|
||||
if (traceScope != null) {
|
||||
traceScope.getSpan().addTimelineAnnotation("Exception: " +
|
||||
StringUtils.stringifyException(e));
|
||||
}
|
||||
} finally {
|
||||
if (traceScope != null) {
|
||||
traceScope.close();
|
||||
}
|
||||
IOUtils.cleanup(LOG, traceScope);
|
||||
}
|
||||
}
|
||||
|
@ -2529,6 +2532,10 @@ public abstract class Server {
|
|||
/** Sets the socket buffer size used for responding to RPCs */
|
||||
public void setSocketSendBufSize(int size) { this.socketSendBufferSize = size; }
|
||||
|
||||
public void setTracer(Tracer t) {
|
||||
this.tracer = t;
|
||||
}
|
||||
|
||||
/** Starts the service. Must be called before any calls will be handled. */
|
||||
public synchronized void start() {
|
||||
responder.start();
|
||||
|
|
|
@ -42,8 +42,8 @@ import org.apache.hadoop.util.Time;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.*;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/** An RpcEngine implementation for Writable data. */
|
||||
@InterfaceStability.Evolving
|
||||
|
@ -233,9 +233,14 @@ public class WritableRpcEngine implements RpcEngine {
|
|||
if (LOG.isDebugEnabled()) {
|
||||
startTime = Time.now();
|
||||
}
|
||||
|
||||
// if Tracing is on then start a new span for this rpc.
|
||||
// guard it in the if statement to make sure there isn't
|
||||
// any extra string manipulation.
|
||||
Tracer tracer = Tracer.curThreadTracer();
|
||||
TraceScope traceScope = null;
|
||||
if (Trace.isTracing()) {
|
||||
traceScope = Trace.startSpan(RpcClientUtil.methodToTraceString(method));
|
||||
if (tracer != null) {
|
||||
traceScope = tracer.newScope(RpcClientUtil.methodToTraceString(method));
|
||||
}
|
||||
ObjectWritable value;
|
||||
try {
|
||||
|
|
|
@ -1,208 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.tracing;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Collections;
|
||||
import java.util.HashMap;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||
import org.apache.hadoop.util.ShutdownHookManager;
|
||||
import org.apache.htrace.SpanReceiver;
|
||||
import org.apache.htrace.SpanReceiverBuilder;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.impl.LocalFileSpanReceiver;
|
||||
|
||||
/**
|
||||
* This class provides functions for reading the names of SpanReceivers from
|
||||
* the Hadoop configuration, adding those SpanReceivers to the Tracer,
|
||||
* and closing those SpanReceivers when appropriate.
|
||||
* This class does nothing If no SpanReceiver is configured.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class SpanReceiverHost implements TraceAdminProtocol {
|
||||
public static final String SPAN_RECEIVERS_CONF_SUFFIX =
|
||||
"spanreceiver.classes";
|
||||
private static final Log LOG = LogFactory.getLog(SpanReceiverHost.class);
|
||||
private static final HashMap<String, SpanReceiverHost> hosts =
|
||||
new HashMap<String, SpanReceiverHost>(1);
|
||||
private final TreeMap<Long, SpanReceiver> receivers =
|
||||
new TreeMap<Long, SpanReceiver>();
|
||||
private final String confPrefix;
|
||||
private Configuration config;
|
||||
private boolean closed = false;
|
||||
private long highestId = 1;
|
||||
|
||||
private final static String LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX =
|
||||
"local-file-span-receiver.path";
|
||||
|
||||
public static SpanReceiverHost get(Configuration conf, String confPrefix) {
|
||||
synchronized (SpanReceiverHost.class) {
|
||||
SpanReceiverHost host = hosts.get(confPrefix);
|
||||
if (host != null) {
|
||||
return host;
|
||||
}
|
||||
final SpanReceiverHost newHost = new SpanReceiverHost(confPrefix);
|
||||
newHost.loadSpanReceivers(conf);
|
||||
ShutdownHookManager.get().addShutdownHook(new Runnable() {
|
||||
public void run() {
|
||||
newHost.closeReceivers();
|
||||
}
|
||||
}, 0);
|
||||
hosts.put(confPrefix, newHost);
|
||||
return newHost;
|
||||
}
|
||||
}
|
||||
|
||||
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
||||
|
||||
private SpanReceiverHost(String confPrefix) {
|
||||
this.confPrefix = confPrefix;
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads the names of classes specified in the
|
||||
* "hadoop.htrace.spanreceiver.classes" property and instantiates and registers
|
||||
* them with the Tracer as SpanReceiver's.
|
||||
*
|
||||
* The nullary constructor is called during construction, but if the classes
|
||||
* specified implement the Configurable interface, setConfiguration() will be
|
||||
* called on them. This allows SpanReceivers to use values from the Hadoop
|
||||
* configuration.
|
||||
*/
|
||||
public synchronized void loadSpanReceivers(Configuration conf) {
|
||||
config = new Configuration(conf);
|
||||
String receiverKey = confPrefix + SPAN_RECEIVERS_CONF_SUFFIX;
|
||||
String[] receiverNames = config.getTrimmedStrings(receiverKey);
|
||||
if (receiverNames == null || receiverNames.length == 0) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("No span receiver names found in " + receiverKey + ".");
|
||||
}
|
||||
return;
|
||||
}
|
||||
// It's convenient to have each daemon log to a random trace file when
|
||||
// testing.
|
||||
String pathKey = confPrefix + LOCAL_FILE_SPAN_RECEIVER_PATH_SUFFIX;
|
||||
if (config.get(pathKey) == null) {
|
||||
String uniqueFile = LocalFileSpanReceiver.getUniqueLocalTraceFileName();
|
||||
config.set(pathKey, uniqueFile);
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("Set " + pathKey + " to " + uniqueFile);
|
||||
}
|
||||
}
|
||||
for (String className : receiverNames) {
|
||||
try {
|
||||
SpanReceiver rcvr = loadInstance(className, EMPTY);
|
||||
Trace.addReceiver(rcvr);
|
||||
receivers.put(highestId++, rcvr);
|
||||
LOG.info("Loaded SpanReceiver " + className + " successfully.");
|
||||
} catch (IOException e) {
|
||||
LOG.error("Failed to load SpanReceiver", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized SpanReceiver loadInstance(String className,
|
||||
List<ConfigurationPair> extraConfig) throws IOException {
|
||||
SpanReceiverBuilder builder =
|
||||
new SpanReceiverBuilder(TraceUtils.
|
||||
wrapHadoopConf(confPrefix, config, extraConfig));
|
||||
SpanReceiver rcvr = builder.spanReceiverClass(className.trim()).build();
|
||||
if (rcvr == null) {
|
||||
throw new IOException("Failed to load SpanReceiver " + className);
|
||||
}
|
||||
return rcvr;
|
||||
}
|
||||
|
||||
/**
|
||||
* Calls close() on all SpanReceivers created by this SpanReceiverHost.
|
||||
*/
|
||||
public synchronized void closeReceivers() {
|
||||
if (closed) return;
|
||||
closed = true;
|
||||
for (SpanReceiver rcvr : receivers.values()) {
|
||||
try {
|
||||
rcvr.close();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to close SpanReceiver correctly: " + e.getMessage(), e);
|
||||
}
|
||||
}
|
||||
receivers.clear();
|
||||
}
|
||||
|
||||
public synchronized SpanReceiverInfo[] listSpanReceivers()
|
||||
throws IOException {
|
||||
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.size()];
|
||||
int i = 0;
|
||||
|
||||
for(Map.Entry<Long, SpanReceiver> entry : receivers.entrySet()) {
|
||||
info[i] = new SpanReceiverInfo(entry.getKey(),
|
||||
entry.getValue().getClass().getName());
|
||||
i++;
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
public synchronized long addSpanReceiver(SpanReceiverInfo info)
|
||||
throws IOException {
|
||||
StringBuilder configStringBuilder = new StringBuilder();
|
||||
String prefix = "";
|
||||
for (ConfigurationPair pair : info.configPairs) {
|
||||
configStringBuilder.append(prefix).append(pair.getKey()).
|
||||
append(" = ").append(pair.getValue());
|
||||
prefix = ", ";
|
||||
}
|
||||
SpanReceiver rcvr = null;
|
||||
try {
|
||||
rcvr = loadInstance(info.getClassName(), info.configPairs);
|
||||
} catch (IOException e) {
|
||||
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
|
||||
" with configuration " + configStringBuilder.toString(), e);
|
||||
throw e;
|
||||
} catch (RuntimeException e) {
|
||||
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
|
||||
" with configuration " + configStringBuilder.toString(), e);
|
||||
throw e;
|
||||
}
|
||||
Trace.addReceiver(rcvr);
|
||||
long newId = highestId++;
|
||||
receivers.put(newId, rcvr);
|
||||
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
|
||||
" with configuration " + configStringBuilder.toString());
|
||||
return newId;
|
||||
}
|
||||
|
||||
public synchronized void removeSpanReceiver(long spanReceiverId)
|
||||
throws IOException {
|
||||
SpanReceiver rcvr = receivers.remove(spanReceiverId);
|
||||
if (rcvr == null) {
|
||||
throw new IOException("There is no span receiver with id " + spanReceiverId);
|
||||
}
|
||||
Trace.removeReceiver(rcvr);
|
||||
rcvr.close();
|
||||
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
|
||||
" with class " + rcvr.getClass().getName());
|
||||
}
|
||||
}
|
|
@ -24,7 +24,7 @@ import java.util.List;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||
import org.apache.htrace.HTraceConfiguration;
|
||||
import org.apache.htrace.core.HTraceConfiguration;
|
||||
|
||||
/**
|
||||
* This class provides utility functions for tracing.
|
||||
|
@ -32,6 +32,7 @@ import org.apache.htrace.HTraceConfiguration;
|
|||
@InterfaceAudience.Private
|
||||
public class TraceUtils {
|
||||
private static List<ConfigurationPair> EMPTY = Collections.emptyList();
|
||||
static final String DEFAULT_HADOOP_PREFIX = "hadoop.htrace.";
|
||||
|
||||
public static HTraceConfiguration wrapHadoopConf(final String prefix,
|
||||
final Configuration conf) {
|
||||
|
@ -47,16 +48,27 @@ public class TraceUtils {
|
|||
return new HTraceConfiguration() {
|
||||
@Override
|
||||
public String get(String key) {
|
||||
return get(key, "");
|
||||
String ret = getInternal(prefix + key);
|
||||
if (ret != null) {
|
||||
return ret;
|
||||
}
|
||||
return getInternal(DEFAULT_HADOOP_PREFIX + key);
|
||||
}
|
||||
|
||||
@Override
|
||||
public String get(String key, String defaultValue) {
|
||||
String prefixedKey = prefix + key;
|
||||
if (extraMap.containsKey(prefixedKey)) {
|
||||
return extraMap.get(prefixedKey);
|
||||
String ret = get(key);
|
||||
if (ret != null) {
|
||||
return ret;
|
||||
}
|
||||
return conf.get(prefixedKey, defaultValue);
|
||||
return defaultValue;
|
||||
}
|
||||
|
||||
private String getInternal(String key) {
|
||||
if (extraMap.containsKey(key)) {
|
||||
return extraMap.get(key);
|
||||
}
|
||||
return conf.get(key);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.tracing;
|
||||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||
import org.apache.htrace.core.SpanReceiver;
|
||||
import org.apache.htrace.core.TracerPool;
|
||||
|
||||
/**
|
||||
* This class provides functions for managing the tracer configuration at
|
||||
* runtime via an RPC protocol.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class TracerConfigurationManager implements TraceAdminProtocol {
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TracerConfigurationManager.class);
|
||||
|
||||
private final String confPrefix;
|
||||
private final Configuration conf;
|
||||
|
||||
public TracerConfigurationManager(String confPrefix, Configuration conf) {
|
||||
this.confPrefix = confPrefix;
|
||||
this.conf = conf;
|
||||
}
|
||||
|
||||
public synchronized SpanReceiverInfo[] listSpanReceivers()
|
||||
throws IOException {
|
||||
TracerPool pool = TracerPool.getGlobalTracerPool();
|
||||
SpanReceiver[] receivers = pool.getReceivers();
|
||||
SpanReceiverInfo[] info = new SpanReceiverInfo[receivers.length];
|
||||
for (int i = 0; i < receivers.length; i++) {
|
||||
SpanReceiver receiver = receivers[i];
|
||||
info[i] = new SpanReceiverInfo(receiver.getId(),
|
||||
receiver.getClass().getName());
|
||||
}
|
||||
return info;
|
||||
}
|
||||
|
||||
public synchronized long addSpanReceiver(SpanReceiverInfo info)
|
||||
throws IOException {
|
||||
StringBuilder configStringBuilder = new StringBuilder();
|
||||
String prefix = "";
|
||||
for (ConfigurationPair pair : info.configPairs) {
|
||||
configStringBuilder.append(prefix).append(pair.getKey()).
|
||||
append(" = ").append(pair.getValue());
|
||||
prefix = ", ";
|
||||
}
|
||||
SpanReceiver rcvr = null;
|
||||
try {
|
||||
rcvr = new SpanReceiver.Builder(TraceUtils.wrapHadoopConf(
|
||||
confPrefix, conf, info.configPairs)).
|
||||
className(info.getClassName().trim()).
|
||||
build();
|
||||
} catch (RuntimeException e) {
|
||||
LOG.info("Failed to add SpanReceiver " + info.getClassName() +
|
||||
" with configuration " + configStringBuilder.toString(), e);
|
||||
throw e;
|
||||
}
|
||||
TracerPool.getGlobalTracerPool().addReceiver(rcvr);
|
||||
LOG.info("Successfully added SpanReceiver " + info.getClassName() +
|
||||
" with configuration " + configStringBuilder.toString());
|
||||
return rcvr.getId();
|
||||
}
|
||||
|
||||
public synchronized void removeSpanReceiver(long spanReceiverId)
|
||||
throws IOException {
|
||||
SpanReceiver[] receivers =
|
||||
TracerPool.getGlobalTracerPool().getReceivers();
|
||||
for (SpanReceiver receiver : receivers) {
|
||||
if (receiver.getId() == spanReceiverId) {
|
||||
TracerPool.getGlobalTracerPool().removeAndCloseReceiver(receiver);
|
||||
LOG.info("Successfully removed SpanReceiver " + spanReceiverId +
|
||||
" with class " + receiver.getClass().getName());
|
||||
return;
|
||||
}
|
||||
}
|
||||
throw new IOException("There is no span receiver with id " + spanReceiverId);
|
||||
}
|
||||
}
|
|
@ -27,8 +27,8 @@ import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformation
|
|||
import org.apache.hadoop.ipc.protobuf.RpcHeaderProtos.*;
|
||||
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
|
@ -169,11 +169,12 @@ public abstract class ProtoUtil {
|
|||
.setRetryCount(retryCount).setClientId(ByteString.copyFrom(uuid));
|
||||
|
||||
// Add tracing info if we are currently tracing.
|
||||
if (Trace.isTracing()) {
|
||||
Span s = Trace.currentSpan();
|
||||
Span span = Tracer.getCurrentSpan();
|
||||
if (span != null) {
|
||||
result.setTraceInfo(RPCTraceInfoProto.newBuilder()
|
||||
.setParentId(s.getSpanId())
|
||||
.setTraceId(s.getTraceId()).build());
|
||||
.setTraceId(span.getSpanId().getHigh())
|
||||
.setParentId(span.getSpanId().getLow())
|
||||
.build());
|
||||
}
|
||||
|
||||
return result.build();
|
||||
|
|
|
@ -61,8 +61,9 @@ enum RpcKindProto {
|
|||
* what span caused the new span we will create when this message is received.
|
||||
*/
|
||||
message RPCTraceInfoProto {
|
||||
optional int64 traceId = 1;
|
||||
optional int64 parentId = 2;
|
||||
optional int64 traceId = 1; // parentIdHigh
|
||||
optional int64 parentId = 2; // parentIdLow
|
||||
|
||||
}
|
||||
|
||||
message RpcRequestHeaderProto { // the header for the RpcRequest
|
||||
|
|
|
@ -2000,4 +2000,19 @@ for ldap providers in the same way as above does.
|
|||
the limit is 0 or the -safely is not specified in -rm command.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>fs.client.htrace.sampler.classes</name>
|
||||
<value></value>
|
||||
<description>The class names of the HTrace Samplers to use for Hadoop
|
||||
filesystem clients.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>hadoop.htrace.span.receiver.classes</name>
|
||||
<value></value>
|
||||
<description>The class names of the Span Receivers to use for Hadoop.
|
||||
</description>
|
||||
</property>
|
||||
</configuration>
|
||||
|
|
|
@ -49,37 +49,14 @@ interface bundled with HTrace or implementing it by yourself.
|
|||
* HTracedRESTReceiver
|
||||
* ZipkinSpanReceiver
|
||||
|
||||
In order to set up SpanReceivers for HDFS servers,
|
||||
configure what SpanReceivers you'd like to use
|
||||
by putting a comma separated list of the fully-qualified class name of classes implementing SpanReceiver
|
||||
in `hdfs-site.xml` property: `dfs.htrace.spanreceiver.classes`.
|
||||
|
||||
```xml
|
||||
<property>
|
||||
<name>dfs.htrace.spanreceiver.classes</name>
|
||||
<value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.htrace.local-file-span-receiver.path</name>
|
||||
<value>/var/log/hadoop/htrace.out</value>
|
||||
</property>
|
||||
```
|
||||
|
||||
You can omit package name prefix if you use span receiver bundled with HTrace.
|
||||
|
||||
```xml
|
||||
<property>
|
||||
<name>dfs.htrace.spanreceiver.classes</name>
|
||||
<value>LocalFileSpanReceiver</value>
|
||||
</property>
|
||||
```
|
||||
|
||||
You also need to add the jar bundling SpanReceiver to the classpath of Hadoop
|
||||
on each node. (LocalFileSpanReceiver in the example above is included in the
|
||||
jar of htrace-core which is bundled with Hadoop.)
|
||||
See core-default.xml for a description of HTrace configuration keys. In some
|
||||
cases, you will also need to add the jar containing the SpanReceiver that you
|
||||
are using to the classpath of Hadoop on each node. (In the example above,
|
||||
LocalFileSpanReceiver is included in the htrace-core4 jar which is bundled
|
||||
with Hadoop.)
|
||||
|
||||
```
|
||||
$ cp htrace-htraced/target/htrace-htraced-3.2.0-incubating.jar $HADOOP_HOME/share/hadoop/common/lib/
|
||||
$ cp htrace-htraced/target/htrace-htraced-4.0.1-incubating.jar $HADOOP_HOME/share/hadoop/common/lib/
|
||||
```
|
||||
|
||||
### Dynamic update of tracing configuration
|
||||
|
@ -92,11 +69,11 @@ You need to run the command against all servers if you want to update the config
|
|||
|
||||
$ hadoop trace -list -host 192.168.56.2:9000
|
||||
ID CLASS
|
||||
1 org.apache.htrace.impl.LocalFileSpanReceiver
|
||||
1 org.apache.htrace.core.LocalFileSpanReceiver
|
||||
|
||||
$ hadoop trace -list -host 192.168.56.2:50020
|
||||
ID CLASS
|
||||
1 org.apache.htrace.impl.LocalFileSpanReceiver
|
||||
1 org.apache.htrace.core.LocalFileSpanReceiver
|
||||
|
||||
`hadoop trace -remove` removes span receiver from server.
|
||||
`-remove` options takes id of span receiver as argument.
|
||||
|
@ -113,7 +90,7 @@ You can specify the configuration associated with span receiver by `-Ckey=value`
|
|||
|
||||
$ hadoop trace -list -host 192.168.56.2:9000
|
||||
ID CLASS
|
||||
2 org.apache.htrace.impl.LocalFileSpanReceiver
|
||||
2 org.apache.htrace.core.LocalFileSpanReceiver
|
||||
|
||||
### Starting tracing spans by HTrace API
|
||||
|
||||
|
@ -121,26 +98,21 @@ In order to trace, you will need to wrap the traced logic with **tracing span**
|
|||
When there is running tracing spans,
|
||||
the tracing information is propagated to servers along with RPC requests.
|
||||
|
||||
In addition, you need to initialize `SpanReceiverHost` once per process.
|
||||
|
||||
```java
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
...
|
||||
|
||||
SpanReceiverHost.getInstance(new HdfsConfiguration());
|
||||
|
||||
...
|
||||
|
||||
TraceScope ts = Trace.startSpan("Gets", Sampler.ALWAYS);
|
||||
TraceScope ts = tracer.newScope("Gets");
|
||||
try {
|
||||
... // traced logic
|
||||
} finally {
|
||||
if (ts != null) ts.close();
|
||||
ts.close();
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -154,11 +126,10 @@ which start tracing span before invoking HDFS shell command.
|
|||
import org.apache.hadoop.fs.FsShell;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.Trace;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
public class TracingFsShell {
|
||||
public static void main(String argv[]) throws Exception {
|
||||
|
@ -166,13 +137,19 @@ which start tracing span before invoking HDFS shell command.
|
|||
FsShell shell = new FsShell();
|
||||
conf.setQuietMode(false);
|
||||
shell.setConf(conf);
|
||||
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
|
||||
Tracer tracer = new Tracer.Builder().
|
||||
name("TracingFsShell).
|
||||
conf(TraceUtils.wrapHadoopConf("tracing.fs.shell.htrace.", conf)).
|
||||
build();
|
||||
int res = 0;
|
||||
try (TraceScope ts = Trace.startSpan("FsShell", Sampler.ALWAYS)) {
|
||||
TraceScope scope = tracer.newScope("FsShell");
|
||||
try {
|
||||
res = ToolRunner.run(shell, argv);
|
||||
} finally {
|
||||
scope.close();
|
||||
shell.close();
|
||||
}
|
||||
tracer.close();
|
||||
System.exit(res);
|
||||
}
|
||||
}
|
||||
|
@ -189,16 +166,15 @@ The DFSClient can enable tracing internally. This allows you to use HTrace with
|
|||
your client without modifying the client source code.
|
||||
|
||||
Configure the span receivers and samplers in `hdfs-site.xml`
|
||||
by properties `dfs.client.htrace.sampler` and `dfs.client.htrace.sampler`.
|
||||
The value of `dfs.client.htrace.sampler` can be NeverSampler, AlwaysSampler or ProbabilitySampler.
|
||||
by properties `fs.client.htrace.sampler.classes` and
|
||||
`fs.client.htrace.spanreceiver.classes`. The value of
|
||||
`fs.client.htrace.sampler.classes` can be NeverSampler, AlwaysSampler or
|
||||
ProbabilitySampler.
|
||||
|
||||
* NeverSampler: HTrace is OFF for all requests to namenodes and datanodes;
|
||||
* AlwaysSampler: HTrace is ON for all requests to namenodes and datanodes;
|
||||
* ProbabilitySampler: HTrace is ON for some percentage% of requests to namenodes and datanodes
|
||||
|
||||
You do not need to enable this if your client program has been modified
|
||||
to use HTrace.
|
||||
|
||||
```xml
|
||||
<property>
|
||||
<name>dfs.client.htrace.spanreceiver.classes</name>
|
||||
|
|
|
@ -24,10 +24,9 @@ import junit.framework.AssertionFailedError;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.tracing.SetSpanReceiver;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.htrace.SamplerBuilder;
|
||||
import org.apache.htrace.impl.AlwaysSampler;
|
||||
import org.apache.htrace.core.AlwaysSampler;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -53,10 +52,10 @@ public class TestFsShell {
|
|||
@Test
|
||||
public void testTracing() throws Throwable {
|
||||
Configuration conf = new Configuration();
|
||||
String prefix = FsShell.SEHLL_HTRACE_PREFIX;
|
||||
conf.set(prefix + SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
|
||||
String prefix = "fs.shell.htrace.";
|
||||
conf.set(prefix + Tracer.SPAN_RECEIVER_CLASSES_KEY,
|
||||
SetSpanReceiver.class.getName());
|
||||
conf.set(prefix + SamplerBuilder.SAMPLER_CONF_KEY,
|
||||
conf.set(prefix + Tracer.SAMPLER_CLASSES_KEY,
|
||||
AlwaysSampler.class.getName());
|
||||
conf.setQuietMode(false);
|
||||
FsShell shell = new FsShell(conf);
|
||||
|
|
|
@ -19,9 +19,10 @@ package org.apache.hadoop.tracing;
|
|||
|
||||
import com.google.common.base.Supplier;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.SpanReceiver;
|
||||
import org.apache.htrace.HTraceConfiguration;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.SpanReceiver;
|
||||
import org.apache.htrace.core.HTraceConfiguration;
|
||||
import java.util.Collection;
|
||||
import java.util.HashMap;
|
||||
import java.util.LinkedList;
|
||||
|
@ -39,7 +40,7 @@ import org.junit.Assert;
|
|||
* push all the metrics to a static place, and would make testing
|
||||
* SpanReceiverHost harder.
|
||||
*/
|
||||
public class SetSpanReceiver implements SpanReceiver {
|
||||
public class SetSpanReceiver extends SpanReceiver {
|
||||
|
||||
public SetSpanReceiver(HTraceConfiguration conf) {
|
||||
}
|
||||
|
@ -68,8 +69,8 @@ public class SetSpanReceiver implements SpanReceiver {
|
|||
}
|
||||
|
||||
public static class SetHolder {
|
||||
public static ConcurrentHashMap<Long, Span> spans =
|
||||
new ConcurrentHashMap<Long, Span>();
|
||||
public static ConcurrentHashMap<SpanId, Span> spans =
|
||||
new ConcurrentHashMap<SpanId, Span>();
|
||||
|
||||
public static Map<String, List<Span>> getMap() {
|
||||
Map<String, List<Span>> map = new HashMap<String, List<Span>>();
|
||||
|
|
|
@ -21,7 +21,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import java.util.LinkedList;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.tracing.SpanReceiverInfo.ConfigurationPair;
|
||||
import org.apache.htrace.HTraceConfiguration;
|
||||
import org.apache.htrace.core.HTraceConfiguration;
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestTraceUtils {
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.apache.hadoop.util.Time;
|
|||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -178,6 +179,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
*/
|
||||
private Configuration configuration;
|
||||
|
||||
/**
|
||||
* The HTrace tracer to use.
|
||||
*/
|
||||
private Tracer tracer;
|
||||
|
||||
/**
|
||||
* Information about the domain socket path we should use to connect to the
|
||||
* local peer-- or null if we haven't examined the local domain socket.
|
||||
|
@ -282,6 +288,11 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
return this;
|
||||
}
|
||||
|
||||
public BlockReaderFactory setTracer(Tracer tracer) {
|
||||
this.tracer = tracer;
|
||||
return this;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static void setFailureInjectorForTesting(FailureInjector injector) {
|
||||
failureInjector = injector;
|
||||
|
@ -435,7 +446,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
try {
|
||||
return BlockReaderLocalLegacy.newBlockReader(conf,
|
||||
userGroupInformation, configuration, fileName, block, token,
|
||||
datanode, startOffset, length, storageType);
|
||||
datanode, startOffset, length, storageType, tracer);
|
||||
} catch (RemoteException remoteException) {
|
||||
ioe = remoteException.unwrapRemoteException(
|
||||
InvalidToken.class, AccessControlException.class);
|
||||
|
@ -496,6 +507,7 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
setVerifyChecksum(verifyChecksum).
|
||||
setCachingStrategy(cachingStrategy).
|
||||
setStorageType(storageType).
|
||||
setTracer(tracer).
|
||||
build();
|
||||
}
|
||||
|
||||
|
@ -865,12 +877,12 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
return RemoteBlockReader.newBlockReader(fileName,
|
||||
block, token, startOffset, length, conf.getIoBufferSize(),
|
||||
verifyChecksum, clientName, peer, datanode,
|
||||
clientContext.getPeerCache(), cachingStrategy);
|
||||
clientContext.getPeerCache(), cachingStrategy, tracer);
|
||||
} else {
|
||||
return RemoteBlockReader2.newBlockReader(
|
||||
fileName, block, token, startOffset, length,
|
||||
verifyChecksum, clientName, peer, datanode,
|
||||
clientContext.getPeerCache(), cachingStrategy);
|
||||
clientContext.getPeerCache(), cachingStrategy, tracer);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -34,9 +34,8 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
|||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DirectBufferPool;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -75,6 +74,7 @@ class BlockReaderLocal implements BlockReader {
|
|||
private long dataPos;
|
||||
private ExtendedBlock block;
|
||||
private StorageType storageType;
|
||||
private Tracer tracer;
|
||||
|
||||
public Builder(ShortCircuitConf conf) {
|
||||
this.maxReadahead = Integer.MAX_VALUE;
|
||||
|
@ -120,6 +120,11 @@ class BlockReaderLocal implements BlockReader {
|
|||
return this;
|
||||
}
|
||||
|
||||
public Builder setTracer(Tracer tracer) {
|
||||
this.tracer = tracer;
|
||||
return this;
|
||||
}
|
||||
|
||||
public BlockReaderLocal build() {
|
||||
Preconditions.checkNotNull(replica);
|
||||
return new BlockReaderLocal(this);
|
||||
|
@ -228,6 +233,11 @@ class BlockReaderLocal implements BlockReader {
|
|||
*/
|
||||
private StorageType storageType;
|
||||
|
||||
/**
|
||||
* The Tracer to use.
|
||||
*/
|
||||
private final Tracer tracer;
|
||||
|
||||
private BlockReaderLocal(Builder builder) {
|
||||
this.replica = builder.replica;
|
||||
this.dataIn = replica.getDataStream().getChannel();
|
||||
|
@ -257,6 +267,7 @@ class BlockReaderLocal implements BlockReader {
|
|||
}
|
||||
this.maxReadaheadLength = maxReadaheadChunks * bytesPerChecksum;
|
||||
this.storageType = builder.storageType;
|
||||
this.tracer = builder.tracer;
|
||||
}
|
||||
|
||||
private synchronized void createDataBufIfNeeded() {
|
||||
|
@ -324,8 +335,8 @@ class BlockReaderLocal implements BlockReader {
|
|||
*/
|
||||
private synchronized int fillBuffer(ByteBuffer buf, boolean canSkipChecksum)
|
||||
throws IOException {
|
||||
TraceScope scope = Trace.startSpan("BlockReaderLocal#fillBuffer(" +
|
||||
block.getBlockId() + ")", Sampler.NEVER);
|
||||
TraceScope scope = tracer.newScope(
|
||||
"BlockReaderLocal#fillBuffer(" + block.getBlockId() + ")");
|
||||
try {
|
||||
int total = 0;
|
||||
long startDataPos = dataPos;
|
||||
|
|
|
@ -50,10 +50,8 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DirectBufferPool;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -182,7 +180,8 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
private long startOffset;
|
||||
private final String filename;
|
||||
private long blockId;
|
||||
|
||||
private final Tracer tracer;
|
||||
|
||||
/**
|
||||
* The only way this object can be instantiated.
|
||||
*/
|
||||
|
@ -190,8 +189,8 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
UserGroupInformation userGroupInformation,
|
||||
Configuration configuration, String file, ExtendedBlock blk,
|
||||
Token<BlockTokenIdentifier> token, DatanodeInfo node,
|
||||
long startOffset, long length, StorageType storageType)
|
||||
throws IOException {
|
||||
long startOffset, long length, StorageType storageType,
|
||||
Tracer tracer) throws IOException {
|
||||
final ShortCircuitConf scConf = conf.getShortCircuitConf();
|
||||
LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node
|
||||
.getIpcPort());
|
||||
|
@ -239,10 +238,10 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
- (startOffset % checksum.getBytesPerChecksum());
|
||||
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
|
||||
startOffset, length, pathinfo, checksum, true, dataIn,
|
||||
firstChunkOffset, checksumIn);
|
||||
firstChunkOffset, checksumIn, tracer);
|
||||
} else {
|
||||
localBlockReader = new BlockReaderLocalLegacy(scConf, file, blk, token,
|
||||
startOffset, length, pathinfo, dataIn);
|
||||
startOffset, length, pathinfo, dataIn, tracer);
|
||||
}
|
||||
} catch (IOException e) {
|
||||
// remove from cache
|
||||
|
@ -321,18 +320,18 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
|
||||
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
|
||||
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
||||
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn)
|
||||
throws IOException {
|
||||
long length, BlockLocalPathInfo pathinfo, FileInputStream dataIn,
|
||||
Tracer tracer) throws IOException {
|
||||
this(conf, hdfsfile, block, token, startOffset, length, pathinfo,
|
||||
DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 4), false,
|
||||
dataIn, startOffset, null);
|
||||
dataIn, startOffset, null, tracer);
|
||||
}
|
||||
|
||||
private BlockReaderLocalLegacy(ShortCircuitConf conf, String hdfsfile,
|
||||
ExtendedBlock block, Token<BlockTokenIdentifier> token, long startOffset,
|
||||
long length, BlockLocalPathInfo pathinfo, DataChecksum checksum,
|
||||
boolean verifyChecksum, FileInputStream dataIn, long firstChunkOffset,
|
||||
FileInputStream checksumIn) throws IOException {
|
||||
FileInputStream checksumIn, Tracer tracer) throws IOException {
|
||||
this.filename = hdfsfile;
|
||||
this.checksum = checksum;
|
||||
this.verifyChecksum = verifyChecksum;
|
||||
|
@ -368,6 +367,7 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
bufferPool.returnBuffer(checksumBuff);
|
||||
}
|
||||
}
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -375,8 +375,8 @@ class BlockReaderLocalLegacy implements BlockReader {
|
|||
*/
|
||||
private int fillBuffer(FileInputStream stream, ByteBuffer buf)
|
||||
throws IOException {
|
||||
TraceScope scope = Trace.startSpan("BlockReaderLocalLegacy#fillBuffer(" +
|
||||
blockId + ")", Sampler.NEVER);
|
||||
TraceScope scope = tracer.
|
||||
newScope("BlockReaderLocalLegacy#fillBuffer(" + blockId + ")");
|
||||
try {
|
||||
int bytesRead = stream.getChannel().read(buf);
|
||||
if (bytesRead < 0) {
|
||||
|
|
|
@ -46,9 +46,9 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
|
@ -74,7 +74,8 @@ class BlockStorageLocationUtil {
|
|||
*/
|
||||
private static List<VolumeBlockLocationCallable> createVolumeBlockLocationCallables(
|
||||
Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks,
|
||||
int timeout, boolean connectToDnViaHostname, Span parent) {
|
||||
int timeout, boolean connectToDnViaHostname,
|
||||
Tracer tracer, SpanId parentSpanId) {
|
||||
|
||||
if (datanodeBlocks.isEmpty()) {
|
||||
return Lists.newArrayList();
|
||||
|
@ -114,7 +115,7 @@ class BlockStorageLocationUtil {
|
|||
}
|
||||
VolumeBlockLocationCallable callable = new VolumeBlockLocationCallable(
|
||||
conf, datanode, poolId, blockIds, dnTokens, timeout,
|
||||
connectToDnViaHostname, parent);
|
||||
connectToDnViaHostname, tracer, parentSpanId);
|
||||
callables.add(callable);
|
||||
}
|
||||
return callables;
|
||||
|
@ -133,12 +134,12 @@ class BlockStorageLocationUtil {
|
|||
*/
|
||||
static Map<DatanodeInfo, HdfsBlocksMetadata> queryDatanodesForHdfsBlocksMetadata(
|
||||
Configuration conf, Map<DatanodeInfo, List<LocatedBlock>> datanodeBlocks,
|
||||
int poolsize, int timeoutMs, boolean connectToDnViaHostname)
|
||||
throws InvalidBlockTokenException {
|
||||
int poolsize, int timeoutMs, boolean connectToDnViaHostname,
|
||||
Tracer tracer, SpanId parentSpanId) throws InvalidBlockTokenException {
|
||||
|
||||
List<VolumeBlockLocationCallable> callables =
|
||||
createVolumeBlockLocationCallables(conf, datanodeBlocks, timeoutMs,
|
||||
connectToDnViaHostname, Trace.currentSpan());
|
||||
connectToDnViaHostname, tracer, parentSpanId);
|
||||
|
||||
// Use a thread pool to execute the Callables in parallel
|
||||
List<Future<HdfsBlocksMetadata>> futures =
|
||||
|
@ -322,12 +323,14 @@ class BlockStorageLocationUtil {
|
|||
private final long[] blockIds;
|
||||
private final List<Token<BlockTokenIdentifier>> dnTokens;
|
||||
private final boolean connectToDnViaHostname;
|
||||
private final Span parentSpan;
|
||||
private final Tracer tracer;
|
||||
private final SpanId parentSpanId;
|
||||
|
||||
VolumeBlockLocationCallable(Configuration configuration,
|
||||
DatanodeInfo datanode, String poolId, long []blockIds,
|
||||
List<Token<BlockTokenIdentifier>> dnTokens, int timeout,
|
||||
boolean connectToDnViaHostname, Span parentSpan) {
|
||||
boolean connectToDnViaHostname,
|
||||
Tracer tracer, SpanId parentSpanId) {
|
||||
this.configuration = configuration;
|
||||
this.timeout = timeout;
|
||||
this.datanode = datanode;
|
||||
|
@ -335,7 +338,8 @@ class BlockStorageLocationUtil {
|
|||
this.blockIds = blockIds;
|
||||
this.dnTokens = dnTokens;
|
||||
this.connectToDnViaHostname = connectToDnViaHostname;
|
||||
this.parentSpan = parentSpan;
|
||||
this.tracer = tracer;
|
||||
this.parentSpanId = parentSpanId;
|
||||
}
|
||||
|
||||
public DatanodeInfo getDatanodeInfo() {
|
||||
|
@ -348,7 +352,7 @@ class BlockStorageLocationUtil {
|
|||
// Create the RPC proxy and make the RPC
|
||||
ClientDatanodeProtocol cdp = null;
|
||||
TraceScope scope =
|
||||
Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
|
||||
tracer.newScope("getHdfsBlocksMetadata", parentSpanId);
|
||||
try {
|
||||
cdp = DFSUtilClient.createClientDatanodeProtocolProxy(
|
||||
datanode, configuration,
|
||||
|
|
|
@ -77,6 +77,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
|
|||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsServerDefaults;
|
||||
import org.apache.hadoop.fs.FsStatus;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.HdfsBlockLocation;
|
||||
import org.apache.hadoop.fs.InvalidPathException;
|
||||
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
||||
|
@ -172,24 +173,19 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenRenewer;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.DataChecksum.Type;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.SamplerBuilder;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.base.Joiner;
|
||||
import com.google.common.base.Preconditions;
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.net.InetAddresses;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -212,6 +208,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
|
||||
|
||||
private final Configuration conf;
|
||||
private final Tracer tracer;
|
||||
private final DfsClientConf dfsClientConf;
|
||||
final ClientProtocol namenode;
|
||||
/* The service used for delegation tokens */
|
||||
|
@ -238,7 +235,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
|
||||
new DFSHedgedReadMetrics();
|
||||
private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
|
||||
private final Sampler<?> traceSampler;
|
||||
private final int smallBufferSize;
|
||||
|
||||
public DfsClientConf getConf() {
|
||||
|
@ -302,11 +298,8 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode,
|
||||
Configuration conf, FileSystem.Statistics stats)
|
||||
throws IOException {
|
||||
SpanReceiverHost.get(conf, HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX);
|
||||
traceSampler = new SamplerBuilder(TraceUtils.
|
||||
wrapHadoopConf(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX, conf))
|
||||
.build();
|
||||
// Copy only the required DFSClient configuration
|
||||
this.tracer = FsTracer.get(conf);
|
||||
this.dfsClientConf = new DfsClientConf(conf);
|
||||
this.conf = conf;
|
||||
this.stats = stats;
|
||||
|
@ -628,7 +621,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#getPreferredBlockSize(String)
|
||||
*/
|
||||
public long getBlockSize(String f) throws IOException {
|
||||
TraceScope scope = getPathTraceScope("getBlockSize", f);
|
||||
TraceScope scope = newPathTraceScope("getBlockSize", f);
|
||||
try {
|
||||
return namenode.getPreferredBlockSize(f);
|
||||
} catch (IOException ie) {
|
||||
|
@ -670,7 +663,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public Token<DelegationTokenIdentifier> getDelegationToken(Text renewer)
|
||||
throws IOException {
|
||||
assert dtService != null;
|
||||
TraceScope scope = Trace.startSpan("getDelegationToken", traceSampler);
|
||||
TraceScope scope = tracer.newScope("getDelegationToken");
|
||||
try {
|
||||
Token<DelegationTokenIdentifier> token =
|
||||
namenode.getDelegationToken(renewer);
|
||||
|
@ -826,7 +819,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
@VisibleForTesting
|
||||
public LocatedBlocks getLocatedBlocks(String src, long start, long length)
|
||||
throws IOException {
|
||||
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
||||
TraceScope scope = newPathTraceScope("getBlockLocations", src);
|
||||
try {
|
||||
return callGetBlockLocations(namenode, src, start, length);
|
||||
} finally {
|
||||
|
@ -858,7 +851,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
boolean recoverLease(String src) throws IOException {
|
||||
checkOpen();
|
||||
|
||||
TraceScope scope = getPathTraceScope("recoverLease", src);
|
||||
TraceScope scope = newPathTraceScope("recoverLease", src);
|
||||
try {
|
||||
return namenode.recoverLease(src, clientName);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -884,7 +877,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public BlockLocation[] getBlockLocations(String src, long start,
|
||||
long length) throws IOException, UnresolvedLinkException {
|
||||
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
||||
TraceScope scope = newPathTraceScope("getBlockLocations", src);
|
||||
try {
|
||||
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
|
||||
BlockLocation[] locations = DFSUtilClient.locatedBlocks2Locations(blocks);
|
||||
|
@ -950,14 +943,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
// Make RPCs to the datanodes to get volume locations for its replicas
|
||||
TraceScope scope =
|
||||
Trace.startSpan("getBlockStorageLocations", traceSampler);
|
||||
tracer.newScope("getBlockStorageLocations");
|
||||
Map<DatanodeInfo, HdfsBlocksMetadata> metadatas;
|
||||
try {
|
||||
metadatas = BlockStorageLocationUtil.
|
||||
queryDatanodesForHdfsBlocksMetadata(conf, datanodeBlocks,
|
||||
getConf().getFileBlockStorageLocationsNumThreads(),
|
||||
getConf().getFileBlockStorageLocationsTimeoutMs(),
|
||||
getConf().isConnectToDnViaHostname());
|
||||
getConf().isConnectToDnViaHostname(), tracer, scope.getSpanId());
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("metadata returned: "
|
||||
+ Joiner.on("\n").withKeyValueSeparator("=").join(metadatas));
|
||||
|
@ -983,7 +976,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
private KeyVersion decryptEncryptedDataEncryptionKey(FileEncryptionInfo
|
||||
feInfo) throws IOException {
|
||||
TraceScope scope = Trace.startSpan("decryptEDEK", traceSampler);
|
||||
TraceScope scope = tracer.newScope("decryptEDEK");
|
||||
try {
|
||||
KeyProvider provider = getKeyProvider();
|
||||
if (provider == null) {
|
||||
|
@ -1139,7 +1132,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
throws IOException, UnresolvedLinkException {
|
||||
checkOpen();
|
||||
// Get block info from namenode
|
||||
TraceScope scope = getPathTraceScope("newDFSInputStream", src);
|
||||
TraceScope scope = newPathTraceScope("newDFSInputStream", src);
|
||||
try {
|
||||
return new DFSInputStream(this, src, verifyChecksum, null);
|
||||
} finally {
|
||||
|
@ -1384,7 +1377,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void createSymlink(String target, String link, boolean createParent)
|
||||
throws IOException {
|
||||
TraceScope scope = getPathTraceScope("createSymlink", target);
|
||||
TraceScope scope = newPathTraceScope("createSymlink", target);
|
||||
try {
|
||||
final FsPermission dirPerm = applyUMask(null);
|
||||
namenode.createSymlink(target, link, dirPerm, createParent);
|
||||
|
@ -1410,7 +1403,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public String getLinkTarget(String path) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getLinkTarget", path);
|
||||
TraceScope scope = newPathTraceScope("getLinkTarget", path);
|
||||
try {
|
||||
return namenode.getLinkTarget(path);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -1506,7 +1499,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public boolean setReplication(String src, short replication)
|
||||
throws IOException {
|
||||
TraceScope scope = getPathTraceScope("setReplication", src);
|
||||
TraceScope scope = newPathTraceScope("setReplication", src);
|
||||
try {
|
||||
return namenode.setReplication(src, replication);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1529,7 +1522,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void setStoragePolicy(String src, String policyName)
|
||||
throws IOException {
|
||||
TraceScope scope = getPathTraceScope("setStoragePolicy", src);
|
||||
TraceScope scope = newPathTraceScope("setStoragePolicy", src);
|
||||
try {
|
||||
namenode.setStoragePolicy(src, policyName);
|
||||
} catch (RemoteException e) {
|
||||
|
@ -1550,7 +1543,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public BlockStoragePolicy getStoragePolicy(String path) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getStoragePolicy", path);
|
||||
TraceScope scope = newPathTraceScope("getStoragePolicy", path);
|
||||
try {
|
||||
return namenode.getStoragePolicy(path);
|
||||
} catch (RemoteException e) {
|
||||
|
@ -1567,7 +1560,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @return All the existing storage policies
|
||||
*/
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
|
||||
TraceScope scope = tracer.newScope("getStoragePolicies");
|
||||
try {
|
||||
return namenode.getStoragePolicies();
|
||||
} finally {
|
||||
|
@ -1583,7 +1576,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
@Deprecated
|
||||
public boolean rename(String src, String dst) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getSrcDstTraceScope("rename", src, dst);
|
||||
TraceScope scope = newSrcDstTraceScope("rename", src, dst);
|
||||
try {
|
||||
return namenode.rename(src, dst);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1604,7 +1597,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void concat(String trg, String [] srcs) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("concat", traceSampler);
|
||||
TraceScope scope = tracer.newScope("concat");
|
||||
try {
|
||||
namenode.concat(trg, srcs);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1622,7 +1615,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void rename(String src, String dst, Options.Rename... options)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getSrcDstTraceScope("rename2", src, dst);
|
||||
TraceScope scope = newSrcDstTraceScope("rename2", src, dst);
|
||||
try {
|
||||
namenode.rename2(src, dst, options);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1651,11 +1644,14 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
throw new HadoopIllegalArgumentException(
|
||||
"Cannot truncate to a negative file size: " + newLength + ".");
|
||||
}
|
||||
TraceScope scope = newPathTraceScope("truncate", src);
|
||||
try {
|
||||
return namenode.truncate(src, newLength, clientName);
|
||||
} catch (RemoteException re) {
|
||||
throw re.unwrapRemoteException(AccessControlException.class,
|
||||
UnresolvedPathException.class);
|
||||
} finally {
|
||||
scope.close();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1678,7 +1674,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public boolean delete(String src, boolean recursive) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("delete", src);
|
||||
TraceScope scope = newPathTraceScope("delete", src);
|
||||
try {
|
||||
return namenode.delete(src, recursive);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1720,7 +1716,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public DirectoryListing listPaths(String src, byte[] startAfter,
|
||||
boolean needLocation) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("listPaths", src);
|
||||
TraceScope scope = newPathTraceScope("listPaths", src);
|
||||
try {
|
||||
return namenode.getListing(src, startAfter, needLocation);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1742,7 +1738,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public HdfsFileStatus getFileInfo(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getFileInfo", src);
|
||||
TraceScope scope = newPathTraceScope("getFileInfo", src);
|
||||
try {
|
||||
return namenode.getFileInfo(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1760,7 +1756,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public boolean isFileClosed(String src) throws IOException{
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("isFileClosed", src);
|
||||
TraceScope scope = newPathTraceScope("isFileClosed", src);
|
||||
try {
|
||||
return namenode.isFileClosed(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -1782,7 +1778,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public HdfsFileStatus getFileLinkInfo(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getFileLinkInfo", src);
|
||||
TraceScope scope = newPathTraceScope("getFileLinkInfo", src);
|
||||
try {
|
||||
return namenode.getFileLinkInfo(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2085,7 +2081,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void setPermission(String src, FsPermission permission)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("setPermission", src);
|
||||
TraceScope scope = newPathTraceScope("setPermission", src);
|
||||
try {
|
||||
namenode.setPermission(src, permission);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2110,7 +2106,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void setOwner(String src, String username, String groupname)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("setOwner", src);
|
||||
TraceScope scope = newPathTraceScope("setOwner", src);
|
||||
try {
|
||||
namenode.setOwner(src, username, groupname);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2126,7 +2122,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
private long[] callGetStats() throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("getStats", traceSampler);
|
||||
TraceScope scope = tracer.newScope("getStats");
|
||||
try {
|
||||
return namenode.getStats();
|
||||
} finally {
|
||||
|
@ -2185,7 +2181,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
String cookie)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("listCorruptFileBlocks", path);
|
||||
TraceScope scope = newPathTraceScope("listCorruptFileBlocks", path);
|
||||
try {
|
||||
return namenode.listCorruptFileBlocks(path, cookie);
|
||||
} finally {
|
||||
|
@ -2196,7 +2192,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public DatanodeInfo[] datanodeReport(DatanodeReportType type)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("datanodeReport", traceSampler);
|
||||
TraceScope scope = tracer.newScope("datanodeReport");
|
||||
try {
|
||||
return namenode.getDatanodeReport(type);
|
||||
} finally {
|
||||
|
@ -2208,7 +2204,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
DatanodeReportType type) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope =
|
||||
Trace.startSpan("datanodeStorageReport", traceSampler);
|
||||
tracer.newScope("datanodeStorageReport");
|
||||
try {
|
||||
return namenode.getDatanodeStorageReport(type);
|
||||
} finally {
|
||||
|
@ -2238,7 +2234,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public boolean setSafeMode(SafeModeAction action, boolean isChecked) throws IOException{
|
||||
TraceScope scope =
|
||||
Trace.startSpan("setSafeMode", traceSampler);
|
||||
tracer.newScope("setSafeMode");
|
||||
try {
|
||||
return namenode.setSafeMode(action, isChecked);
|
||||
} finally {
|
||||
|
@ -2257,7 +2253,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public String createSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("createSnapshot", traceSampler);
|
||||
TraceScope scope = tracer.newScope("createSnapshot");
|
||||
try {
|
||||
return namenode.createSnapshot(snapshotRoot, snapshotName);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2279,7 +2275,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void deleteSnapshot(String snapshotRoot, String snapshotName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("deleteSnapshot", traceSampler);
|
||||
TraceScope scope = tracer.newScope("deleteSnapshot");
|
||||
try {
|
||||
namenode.deleteSnapshot(snapshotRoot, snapshotName);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2300,7 +2296,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void renameSnapshot(String snapshotDir, String snapshotOldName,
|
||||
String snapshotNewName) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("renameSnapshot", traceSampler);
|
||||
TraceScope scope = tracer.newScope("renameSnapshot");
|
||||
try {
|
||||
namenode.renameSnapshot(snapshotDir, snapshotOldName, snapshotNewName);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2319,8 +2315,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public SnapshottableDirectoryStatus[] getSnapshottableDirListing()
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("getSnapshottableDirListing",
|
||||
traceSampler);
|
||||
TraceScope scope = tracer.newScope("getSnapshottableDirListing");
|
||||
try {
|
||||
return namenode.getSnapshottableDirListing();
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2337,7 +2332,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void allowSnapshot(String snapshotRoot) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("allowSnapshot", traceSampler);
|
||||
TraceScope scope = tracer.newScope("allowSnapshot");
|
||||
try {
|
||||
namenode.allowSnapshot(snapshotRoot);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2354,7 +2349,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void disallowSnapshot(String snapshotRoot) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("disallowSnapshot", traceSampler);
|
||||
TraceScope scope = tracer.newScope("disallowSnapshot");
|
||||
try {
|
||||
namenode.disallowSnapshot(snapshotRoot);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2372,7 +2367,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public SnapshotDiffReport getSnapshotDiffReport(String snapshotDir,
|
||||
String fromSnapshot, String toSnapshot) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("getSnapshotDiffReport", traceSampler);
|
||||
TraceScope scope = tracer.newScope("getSnapshotDiffReport");
|
||||
try {
|
||||
return namenode.getSnapshotDiffReport(snapshotDir,
|
||||
fromSnapshot, toSnapshot);
|
||||
|
@ -2386,7 +2381,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public long addCacheDirective(
|
||||
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("addCacheDirective", traceSampler);
|
||||
TraceScope scope = tracer.newScope("addCacheDirective");
|
||||
try {
|
||||
return namenode.addCacheDirective(info, flags);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2399,7 +2394,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void modifyCacheDirective(
|
||||
CacheDirectiveInfo info, EnumSet<CacheFlag> flags) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("modifyCacheDirective", traceSampler);
|
||||
TraceScope scope = tracer.newScope("modifyCacheDirective");
|
||||
try {
|
||||
namenode.modifyCacheDirective(info, flags);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2412,7 +2407,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void removeCacheDirective(long id)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("removeCacheDirective", traceSampler);
|
||||
TraceScope scope = tracer.newScope("removeCacheDirective");
|
||||
try {
|
||||
namenode.removeCacheDirective(id);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2424,12 +2419,12 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
|
||||
CacheDirectiveInfo filter) throws IOException {
|
||||
return new CacheDirectiveIterator(namenode, filter, traceSampler);
|
||||
return new CacheDirectiveIterator(namenode, filter, tracer);
|
||||
}
|
||||
|
||||
public void addCachePool(CachePoolInfo info) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("addCachePool", traceSampler);
|
||||
TraceScope scope = tracer.newScope("addCachePool");
|
||||
try {
|
||||
namenode.addCachePool(info);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2441,7 +2436,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void modifyCachePool(CachePoolInfo info) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("modifyCachePool", traceSampler);
|
||||
TraceScope scope = tracer.newScope("modifyCachePool");
|
||||
try {
|
||||
namenode.modifyCachePool(info);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2453,7 +2448,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void removeCachePool(String poolName) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("removeCachePool", traceSampler);
|
||||
TraceScope scope = tracer.newScope("removeCachePool");
|
||||
try {
|
||||
namenode.removeCachePool(poolName);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2464,7 +2459,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
|
||||
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
||||
return new CachePoolIterator(namenode, traceSampler);
|
||||
return new CachePoolIterator(namenode, tracer);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -2473,7 +2468,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#saveNamespace()
|
||||
*/
|
||||
void saveNamespace() throws AccessControlException, IOException {
|
||||
TraceScope scope = Trace.startSpan("saveNamespace", traceSampler);
|
||||
TraceScope scope = tracer.newScope("saveNamespace");
|
||||
try {
|
||||
namenode.saveNamespace();
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2490,7 +2485,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#rollEdits()
|
||||
*/
|
||||
long rollEdits() throws AccessControlException, IOException {
|
||||
TraceScope scope = Trace.startSpan("rollEdits", traceSampler);
|
||||
TraceScope scope = tracer.newScope("rollEdits");
|
||||
try {
|
||||
return namenode.rollEdits();
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2512,7 +2507,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
boolean restoreFailedStorage(String arg)
|
||||
throws AccessControlException, IOException{
|
||||
TraceScope scope = Trace.startSpan("restoreFailedStorage", traceSampler);
|
||||
TraceScope scope = tracer.newScope("restoreFailedStorage");
|
||||
try {
|
||||
return namenode.restoreFailedStorage(arg);
|
||||
} finally {
|
||||
|
@ -2528,7 +2523,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#refreshNodes()
|
||||
*/
|
||||
public void refreshNodes() throws IOException {
|
||||
TraceScope scope = Trace.startSpan("refreshNodes", traceSampler);
|
||||
TraceScope scope = tracer.newScope("refreshNodes");
|
||||
try {
|
||||
namenode.refreshNodes();
|
||||
} finally {
|
||||
|
@ -2542,7 +2537,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#metaSave(String)
|
||||
*/
|
||||
public void metaSave(String pathname) throws IOException {
|
||||
TraceScope scope = Trace.startSpan("metaSave", traceSampler);
|
||||
TraceScope scope = tracer.newScope("metaSave");
|
||||
try {
|
||||
namenode.metaSave(pathname);
|
||||
} finally {
|
||||
|
@ -2559,7 +2554,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#setBalancerBandwidth(long)
|
||||
*/
|
||||
public void setBalancerBandwidth(long bandwidth) throws IOException {
|
||||
TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler);
|
||||
TraceScope scope = tracer.newScope("setBalancerBandwidth");
|
||||
try {
|
||||
namenode.setBalancerBandwidth(bandwidth);
|
||||
} finally {
|
||||
|
@ -2571,7 +2566,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#finalizeUpgrade()
|
||||
*/
|
||||
public void finalizeUpgrade() throws IOException {
|
||||
TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler);
|
||||
TraceScope scope = tracer.newScope("finalizeUpgrade");
|
||||
try {
|
||||
namenode.finalizeUpgrade();
|
||||
} finally {
|
||||
|
@ -2580,7 +2575,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
|
||||
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
|
||||
TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler);
|
||||
TraceScope scope = tracer.newScope("rollingUpgrade");
|
||||
try {
|
||||
return namenode.rollingUpgrade(action);
|
||||
} finally {
|
||||
|
@ -2638,7 +2633,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
if(LOG.isDebugEnabled()) {
|
||||
LOG.debug(src + ": masked=" + absPermission);
|
||||
}
|
||||
TraceScope scope = Trace.startSpan("mkdir", traceSampler);
|
||||
TraceScope scope = tracer.newScope("mkdir");
|
||||
try {
|
||||
return namenode.mkdirs(src, absPermission, createParent);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2665,7 +2660,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#getContentSummary(String)
|
||||
*/
|
||||
ContentSummary getContentSummary(String src) throws IOException {
|
||||
TraceScope scope = getPathTraceScope("getContentSummary", src);
|
||||
TraceScope scope = newPathTraceScope("getContentSummary", src);
|
||||
try {
|
||||
return namenode.getContentSummary(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2693,7 +2688,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
storagespaceQuota);
|
||||
|
||||
}
|
||||
TraceScope scope = getPathTraceScope("setQuota", src);
|
||||
TraceScope scope = newPathTraceScope("setQuota", src);
|
||||
try {
|
||||
// Pass null as storage type for traditional namespace/storagespace quota.
|
||||
namenode.setQuota(src, namespaceQuota, storagespaceQuota, null);
|
||||
|
@ -2728,7 +2723,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
throw new IllegalArgumentException("Don't support Quota for storage type : "
|
||||
+ type.toString());
|
||||
}
|
||||
TraceScope scope = getPathTraceScope("setQuotaByStorageType", src);
|
||||
TraceScope scope = newPathTraceScope("setQuotaByStorageType", src);
|
||||
try {
|
||||
namenode.setQuota(src, HdfsConstants.QUOTA_DONT_SET, quota, type);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2748,7 +2743,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void setTimes(String src, long mtime, long atime) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("setTimes", src);
|
||||
TraceScope scope = newPathTraceScope("setTimes", src);
|
||||
try {
|
||||
namenode.setTimes(src, mtime, atime);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2809,7 +2804,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void modifyAclEntries(String src, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("modifyAclEntries", src);
|
||||
TraceScope scope = newPathTraceScope("modifyAclEntries", src);
|
||||
try {
|
||||
namenode.modifyAclEntries(src, aclSpec);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2828,7 +2823,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void removeAclEntries(String src, List<AclEntry> aclSpec)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("removeAclEntries", traceSampler);
|
||||
TraceScope scope = tracer.newScope("removeAclEntries");
|
||||
try {
|
||||
namenode.removeAclEntries(src, aclSpec);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2846,7 +2841,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void removeDefaultAcl(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("removeDefaultAcl", traceSampler);
|
||||
TraceScope scope = tracer.newScope("removeDefaultAcl");
|
||||
try {
|
||||
namenode.removeDefaultAcl(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2864,7 +2859,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void removeAcl(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("removeAcl", traceSampler);
|
||||
TraceScope scope = tracer.newScope("removeAcl");
|
||||
try {
|
||||
namenode.removeAcl(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2882,7 +2877,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void setAcl(String src, List<AclEntry> aclSpec) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("setAcl", traceSampler);
|
||||
TraceScope scope = tracer.newScope("setAcl");
|
||||
try {
|
||||
namenode.setAcl(src, aclSpec);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2900,7 +2895,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public AclStatus getAclStatus(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getAclStatus", src);
|
||||
TraceScope scope = newPathTraceScope("getAclStatus", src);
|
||||
try {
|
||||
return namenode.getAclStatus(src);
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2916,7 +2911,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public void createEncryptionZone(String src, String keyName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("createEncryptionZone", src);
|
||||
TraceScope scope = newPathTraceScope("createEncryptionZone", src);
|
||||
try {
|
||||
namenode.createEncryptionZone(src, keyName);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2931,7 +2926,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public EncryptionZone getEZForPath(String src)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getEZForPath", src);
|
||||
TraceScope scope = newPathTraceScope("getEZForPath", src);
|
||||
try {
|
||||
return namenode.getEZForPath(src);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2945,13 +2940,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public RemoteIterator<EncryptionZone> listEncryptionZones()
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
return new EncryptionZoneIterator(namenode, traceSampler);
|
||||
return new EncryptionZoneIterator(namenode, tracer);
|
||||
}
|
||||
|
||||
public void setXAttr(String src, String name, byte[] value,
|
||||
EnumSet<XAttrSetFlag> flag) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("setXAttr", src);
|
||||
TraceScope scope = newPathTraceScope("setXAttr", src);
|
||||
try {
|
||||
namenode.setXAttr(src, XAttrHelper.buildXAttr(name, value), flag);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -2968,7 +2963,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public byte[] getXAttr(String src, String name) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getXAttr", src);
|
||||
TraceScope scope = newPathTraceScope("getXAttr", src);
|
||||
try {
|
||||
final List<XAttr> xAttrs = XAttrHelper.buildXAttrAsList(name);
|
||||
final List<XAttr> result = namenode.getXAttrs(src, xAttrs);
|
||||
|
@ -2984,7 +2979,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public Map<String, byte[]> getXAttrs(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getXAttrs", src);
|
||||
TraceScope scope = newPathTraceScope("getXAttrs", src);
|
||||
try {
|
||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(src, null));
|
||||
} catch(RemoteException re) {
|
||||
|
@ -2999,7 +2994,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public Map<String, byte[]> getXAttrs(String src, List<String> names)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getXAttrs", src);
|
||||
TraceScope scope = newPathTraceScope("getXAttrs", src);
|
||||
try {
|
||||
return XAttrHelper.buildXAttrMap(namenode.getXAttrs(
|
||||
src, XAttrHelper.buildXAttrs(names)));
|
||||
|
@ -3015,7 +3010,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public List<String> listXAttrs(String src)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("listXAttrs", src);
|
||||
TraceScope scope = newPathTraceScope("listXAttrs", src);
|
||||
try {
|
||||
final Map<String, byte[]> xattrs =
|
||||
XAttrHelper.buildXAttrMap(namenode.listXAttrs(src));
|
||||
|
@ -3031,7 +3026,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void removeXAttr(String src, String name) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("removeXAttr", src);
|
||||
TraceScope scope = newPathTraceScope("removeXAttr", src);
|
||||
try {
|
||||
namenode.removeXAttr(src, XAttrHelper.buildXAttr(name));
|
||||
} catch(RemoteException re) {
|
||||
|
@ -3048,7 +3043,7 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public void checkAccess(String src, FsAction mode) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("checkAccess", src);
|
||||
TraceScope scope = newPathTraceScope("checkAccess", src);
|
||||
try {
|
||||
namenode.checkAccess(src, mode);
|
||||
} catch (RemoteException re) {
|
||||
|
@ -3061,12 +3056,13 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
||||
return new DFSInotifyEventInputStream(traceSampler, namenode);
|
||||
return new DFSInotifyEventInputStream(namenode, tracer);
|
||||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
|
||||
throws IOException {
|
||||
return new DFSInotifyEventInputStream(traceSampler, namenode, lastReadTxid);
|
||||
return new DFSInotifyEventInputStream(namenode, tracer,
|
||||
lastReadTxid);
|
||||
}
|
||||
|
||||
@Override // RemotePeerFactory
|
||||
|
@ -3176,28 +3172,26 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
return saslClient;
|
||||
}
|
||||
|
||||
TraceScope getPathTraceScope(String description, String path) {
|
||||
TraceScope scope = Trace.startSpan(description, traceSampler);
|
||||
Span span = scope.getSpan();
|
||||
if (span != null) {
|
||||
if (path != null) {
|
||||
span.addKVAnnotation("path", path);
|
||||
}
|
||||
TraceScope newPathTraceScope(String description, String path) {
|
||||
TraceScope scope = tracer.newScope(description);
|
||||
if (path != null) {
|
||||
scope.addKVAnnotation("path", path);
|
||||
}
|
||||
return scope;
|
||||
}
|
||||
|
||||
TraceScope getSrcDstTraceScope(String description, String src, String dst) {
|
||||
TraceScope scope = Trace.startSpan(description, traceSampler);
|
||||
Span span = scope.getSpan();
|
||||
if (span != null) {
|
||||
if (src != null) {
|
||||
span.addKVAnnotation("src", src);
|
||||
}
|
||||
if (dst != null) {
|
||||
span.addKVAnnotation("dst", dst);
|
||||
}
|
||||
TraceScope newSrcDstTraceScope(String description, String src, String dst) {
|
||||
TraceScope scope = tracer.newScope(description);
|
||||
if (src != null) {
|
||||
scope.addKVAnnotation("src", src);
|
||||
}
|
||||
if (dst != null) {
|
||||
scope.addKVAnnotation("dst", dst);
|
||||
}
|
||||
return scope;
|
||||
}
|
||||
|
||||
Tracer getTracer() {
|
||||
return tracer;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,9 +26,8 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
|
|||
import org.apache.hadoop.hdfs.inotify.MissingEventsException;
|
||||
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -47,11 +46,6 @@ public class DFSInotifyEventInputStream {
|
|||
public static final Logger LOG = LoggerFactory.getLogger(
|
||||
DFSInotifyEventInputStream.class);
|
||||
|
||||
/**
|
||||
* The trace sampler to use when making RPCs to the NameNode.
|
||||
*/
|
||||
private final Sampler<?> traceSampler;
|
||||
|
||||
private final ClientProtocol namenode;
|
||||
private Iterator<EventBatch> it;
|
||||
private long lastReadTxid;
|
||||
|
@ -65,20 +59,22 @@ public class DFSInotifyEventInputStream {
|
|||
*/
|
||||
private Random rng = new Random();
|
||||
|
||||
private final Tracer tracer;
|
||||
|
||||
private static final int INITIAL_WAIT_MS = 10;
|
||||
|
||||
DFSInotifyEventInputStream(Sampler<?> traceSampler, ClientProtocol namenode)
|
||||
DFSInotifyEventInputStream(ClientProtocol namenode, Tracer tracer)
|
||||
throws IOException {
|
||||
// Only consider new transaction IDs.
|
||||
this(traceSampler, namenode, namenode.getCurrentEditLogTxid());
|
||||
this(namenode, tracer, namenode.getCurrentEditLogTxid());
|
||||
}
|
||||
|
||||
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
|
||||
long lastReadTxid) throws IOException {
|
||||
this.traceSampler = traceSampler;
|
||||
DFSInotifyEventInputStream(ClientProtocol namenode,
|
||||
Tracer tracer, long lastReadTxid) throws IOException {
|
||||
this.namenode = namenode;
|
||||
this.it = Iterators.emptyIterator();
|
||||
this.lastReadTxid = lastReadTxid;
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -98,8 +94,7 @@ public class DFSInotifyEventInputStream {
|
|||
* The next available batch of events will be returned.
|
||||
*/
|
||||
public EventBatch poll() throws IOException, MissingEventsException {
|
||||
TraceScope scope =
|
||||
Trace.startSpan("inotifyPoll", traceSampler);
|
||||
TraceScope scope = tracer.newScope("inotifyPoll");
|
||||
try {
|
||||
// need to keep retrying until the NN sends us the latest committed txid
|
||||
if (lastReadTxid == -1) {
|
||||
|
@ -180,7 +175,7 @@ public class DFSInotifyEventInputStream {
|
|||
*/
|
||||
public EventBatch poll(long time, TimeUnit tu) throws IOException,
|
||||
InterruptedException, MissingEventsException {
|
||||
TraceScope scope = Trace.startSpan("inotifyPollWithTimeout", traceSampler);
|
||||
TraceScope scope = tracer.newScope("inotifyPollWithTimeout");
|
||||
EventBatch next = null;
|
||||
try {
|
||||
long initialTime = Time.monotonicNow();
|
||||
|
@ -217,7 +212,7 @@ public class DFSInotifyEventInputStream {
|
|||
*/
|
||||
public EventBatch take() throws IOException, InterruptedException,
|
||||
MissingEventsException {
|
||||
TraceScope scope = Trace.startSpan("inotifyTake", traceSampler);
|
||||
TraceScope scope = tracer.newScope("inotifyTake");
|
||||
EventBatch next = null;
|
||||
try {
|
||||
int nextWaitMin = INITIAL_WAIT_MS;
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.fs.CanUnbuffer;
|
|||
import org.apache.hadoop.fs.ChecksumException;
|
||||
import org.apache.hadoop.fs.FSInputStream;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.HasEnhancedByteBufferAccess;
|
||||
import org.apache.hadoop.fs.ReadOption;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
|
@ -78,9 +79,9 @@ import org.apache.hadoop.net.NetUtils;
|
|||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.IdentityHashStore;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
|
@ -678,6 +679,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
setClientCacheContext(dfsClient.getClientContext()).
|
||||
setUserGroupInformation(dfsClient.ugi).
|
||||
setConfiguration(dfsClient.getConfiguration()).
|
||||
setTracer(dfsClient.getTracer()).
|
||||
build();
|
||||
}
|
||||
|
||||
|
@ -941,7 +943,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
public synchronized int read(final byte buf[], int off, int len) throws IOException {
|
||||
ReaderStrategy byteArrayReader = new ByteArrayStrategy(buf);
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("DFSInputStream#byteArrayRead", src);
|
||||
dfsClient.newPathTraceScope("DFSInputStream#byteArrayRead", src);
|
||||
try {
|
||||
return readWithStrategy(byteArrayReader, off, len);
|
||||
} finally {
|
||||
|
@ -953,7 +955,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
public synchronized int read(final ByteBuffer buf) throws IOException {
|
||||
ReaderStrategy byteBufferReader = new ByteBufferStrategy(buf);
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("DFSInputStream#byteBufferRead", src);
|
||||
dfsClient.newPathTraceScope("DFSInputStream#byteBufferRead", src);
|
||||
try {
|
||||
return readWithStrategy(byteBufferReader, 0, buf.remaining());
|
||||
} finally {
|
||||
|
@ -1120,14 +1122,14 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
final ByteBuffer bb,
|
||||
final Map<ExtendedBlock, Set<DatanodeInfo>> corruptedBlockMap,
|
||||
final int hedgedReadId) {
|
||||
final Span parentSpan = Trace.currentSpan();
|
||||
final SpanId parentSpanId = Tracer.getCurrentSpanId();
|
||||
return new Callable<ByteBuffer>() {
|
||||
@Override
|
||||
public ByteBuffer call() throws Exception {
|
||||
byte[] buf = bb.array();
|
||||
int offset = bb.position();
|
||||
TraceScope scope =
|
||||
Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan);
|
||||
TraceScope scope = dfsClient.getTracer().
|
||||
newScope("hedgedRead" + hedgedReadId, parentSpanId);
|
||||
try {
|
||||
actualGetFromOneDataNode(datanode, block, start, end, buf,
|
||||
offset, corruptedBlockMap);
|
||||
|
@ -1449,8 +1451,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead,
|
|||
@Override
|
||||
public int read(long position, byte[] buffer, int offset, int length)
|
||||
throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("DFSInputStream#byteArrayPread", src);
|
||||
TraceScope scope = dfsClient.
|
||||
newPathTraceScope("DFSInputStream#byteArrayPread", src);
|
||||
try {
|
||||
return pread(position, buffer, offset, length);
|
||||
} finally {
|
||||
|
|
|
@ -32,6 +32,7 @@ import org.apache.hadoop.fs.CreateFlag;
|
|||
import org.apache.hadoop.fs.FSOutputSummer;
|
||||
import org.apache.hadoop.fs.FileAlreadyExistsException;
|
||||
import org.apache.hadoop.fs.FileEncryptionInfo;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.ParentNotDirectoryException;
|
||||
import org.apache.hadoop.fs.Syncable;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
|
@ -62,9 +63,7 @@ import org.apache.hadoop.util.DataChecksum;
|
|||
import org.apache.hadoop.util.DataChecksum.Type;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -227,7 +226,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
short replication, long blockSize, Progressable progress, int buffersize,
|
||||
DataChecksum checksum, String[] favoredNodes) throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("newStreamForCreate", src);
|
||||
dfsClient.newPathTraceScope("newStreamForCreate", src);
|
||||
try {
|
||||
HdfsFileStatus stat = null;
|
||||
|
||||
|
@ -350,7 +349,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
LocatedBlock lastBlock, HdfsFileStatus stat, DataChecksum checksum,
|
||||
String[] favoredNodes) throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("newStreamForAppend", src);
|
||||
dfsClient.newPathTraceScope("newStreamForAppend", src);
|
||||
try {
|
||||
final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
|
||||
progress, lastBlock, stat, checksum, favoredNodes);
|
||||
|
@ -375,7 +374,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
}
|
||||
|
||||
protected TraceScope createWriteTraceScope() {
|
||||
return dfsClient.getPathTraceScope("DFSOutputStream#write", src);
|
||||
return dfsClient.newPathTraceScope("DFSOutputStream#write", src);
|
||||
}
|
||||
|
||||
// @see FSOutputSummer#writeChunk()
|
||||
|
@ -495,7 +494,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
@Override
|
||||
public void hflush() throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("hflush", src);
|
||||
dfsClient.newPathTraceScope("hflush", src);
|
||||
try {
|
||||
flushOrSync(false, EnumSet.noneOf(SyncFlag.class));
|
||||
} finally {
|
||||
|
@ -506,7 +505,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
@Override
|
||||
public void hsync() throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("hsync", src);
|
||||
dfsClient.newPathTraceScope("hsync", src);
|
||||
try {
|
||||
flushOrSync(true, EnumSet.noneOf(SyncFlag.class));
|
||||
} finally {
|
||||
|
@ -529,7 +528,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
*/
|
||||
public void hsync(EnumSet<SyncFlag> syncFlags) throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("hsync", src);
|
||||
dfsClient.newPathTraceScope("hsync", src);
|
||||
try {
|
||||
flushOrSync(true, syncFlags);
|
||||
} finally {
|
||||
|
@ -770,7 +769,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
@Override
|
||||
public synchronized void close() throws IOException {
|
||||
TraceScope scope =
|
||||
dfsClient.getPathTraceScope("DFSOutputStream#close", src);
|
||||
dfsClient.newPathTraceScope("DFSOutputStream#close", src);
|
||||
try {
|
||||
closeImpl();
|
||||
} finally {
|
||||
|
@ -799,7 +798,7 @@ public class DFSOutputStream extends FSOutputSummer
|
|||
// get last block before destroying the streamer
|
||||
ExtendedBlock lastBlock = getStreamer().getBlock();
|
||||
closeThreads(false);
|
||||
TraceScope scope = Trace.startSpan("completeFile", Sampler.NEVER);
|
||||
TraceScope scope = dfsClient.getTracer().newScope("completeFile");
|
||||
try {
|
||||
completeFile(lastBlock);
|
||||
} finally {
|
||||
|
|
|
@ -27,7 +27,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
|
||||
import org.apache.hadoop.hdfs.util.ByteArrayManager;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
/****************************************************************
|
||||
* DFSPacket is used by DataStreamer and DFSOutputStream.
|
||||
|
@ -38,7 +40,7 @@ import org.apache.htrace.Span;
|
|||
@InterfaceAudience.Private
|
||||
class DFSPacket {
|
||||
public static final long HEART_BEAT_SEQNO = -1L;
|
||||
private static long[] EMPTY = new long[0];
|
||||
private static SpanId[] EMPTY = new SpanId[0];
|
||||
private final long seqno; // sequence number of buffer in block
|
||||
private final long offsetInBlock; // offset in block
|
||||
private boolean syncBlock; // this packet forces the current block to disk
|
||||
|
@ -65,9 +67,9 @@ class DFSPacket {
|
|||
private int checksumPos;
|
||||
private final int dataStart;
|
||||
private int dataPos;
|
||||
private long[] traceParents = EMPTY;
|
||||
private SpanId[] traceParents = EMPTY;
|
||||
private int traceParentsUsed;
|
||||
private Span span;
|
||||
private TraceScope scope;
|
||||
|
||||
/**
|
||||
* Create a new packet.
|
||||
|
@ -293,7 +295,10 @@ class DFSPacket {
|
|||
addTraceParent(span.getSpanId());
|
||||
}
|
||||
|
||||
public void addTraceParent(long id) {
|
||||
public void addTraceParent(SpanId id) {
|
||||
if (!id.isValid()) {
|
||||
return;
|
||||
}
|
||||
if (traceParentsUsed == traceParents.length) {
|
||||
int newLength = (traceParents.length == 0) ? 8 :
|
||||
traceParents.length * 2;
|
||||
|
@ -310,18 +315,18 @@ class DFSPacket {
|
|||
*
|
||||
* Protected by the DFSOutputStream dataQueue lock.
|
||||
*/
|
||||
public long[] getTraceParents() {
|
||||
public SpanId[] getTraceParents() {
|
||||
// Remove duplicates from the array.
|
||||
int len = traceParentsUsed;
|
||||
Arrays.sort(traceParents, 0, len);
|
||||
int i = 0, j = 0;
|
||||
long prevVal = 0; // 0 is not a valid span id
|
||||
SpanId prevVal = SpanId.INVALID;
|
||||
while (true) {
|
||||
if (i == len) {
|
||||
break;
|
||||
}
|
||||
long val = traceParents[i];
|
||||
if (val != prevVal) {
|
||||
SpanId val = traceParents[i];
|
||||
if (!val.equals(prevVal)) {
|
||||
traceParents[j] = val;
|
||||
j++;
|
||||
prevVal = val;
|
||||
|
@ -335,11 +340,11 @@ class DFSPacket {
|
|||
return traceParents;
|
||||
}
|
||||
|
||||
public void setTraceSpan(Span span) {
|
||||
this.span = span;
|
||||
public void setTraceScope(TraceScope scope) {
|
||||
this.scope = scope;
|
||||
}
|
||||
|
||||
public Span getTraceSpan() {
|
||||
return span;
|
||||
public TraceScope getTraceScope() {
|
||||
return scope;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicBoolean;
|
|||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.BlockWrite;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||
|
@ -79,12 +80,11 @@ import org.apache.hadoop.util.Daemon;
|
|||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Progressable;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.NullScope;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceInfo;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.Sampler;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.cache.CacheBuilder;
|
||||
import com.google.common.cache.CacheLoader;
|
||||
|
@ -505,7 +505,7 @@ class DataStreamer extends Daemon {
|
|||
@Override
|
||||
public void run() {
|
||||
long lastPacket = Time.monotonicNow();
|
||||
TraceScope scope = NullScope.INSTANCE;
|
||||
TraceScope scope = null;
|
||||
while (!streamerClosed && dfsClient.clientRunning) {
|
||||
// if the Responder encountered an error, shutdown Responder
|
||||
if (errorState.hasError() && response != null) {
|
||||
|
@ -556,12 +556,11 @@ class DataStreamer extends Daemon {
|
|||
LOG.warn("Caught exception", e);
|
||||
}
|
||||
one = dataQueue.getFirst(); // regular data packet
|
||||
long parents[] = one.getTraceParents();
|
||||
SpanId[] parents = one.getTraceParents();
|
||||
if (parents.length > 0) {
|
||||
scope = Trace.startSpan("dataStreamer", new TraceInfo(0, parents[0]));
|
||||
// TODO: use setParents API once it's available from HTrace 3.2
|
||||
// scope = Trace.startSpan("dataStreamer", Sampler.ALWAYS);
|
||||
// scope.getSpan().setParents(parents);
|
||||
scope = dfsClient.getTracer().
|
||||
newScope("dataStreamer", parents[0]);
|
||||
scope.getSpan().setParents(parents);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -612,12 +611,16 @@ class DataStreamer extends Daemon {
|
|||
}
|
||||
|
||||
// send the packet
|
||||
Span span = null;
|
||||
SpanId spanId = SpanId.INVALID;
|
||||
synchronized (dataQueue) {
|
||||
// move packet from dataQueue to ackQueue
|
||||
if (!one.isHeartbeatPacket()) {
|
||||
span = scope.detach();
|
||||
one.setTraceSpan(span);
|
||||
if (scope != null) {
|
||||
spanId = scope.getSpanId();
|
||||
scope.detach();
|
||||
one.setTraceScope(scope);
|
||||
}
|
||||
scope = null;
|
||||
dataQueue.removeFirst();
|
||||
ackQueue.addLast(one);
|
||||
dataQueue.notifyAll();
|
||||
|
@ -630,7 +633,8 @@ class DataStreamer extends Daemon {
|
|||
}
|
||||
|
||||
// write out data to remote datanode
|
||||
TraceScope writeScope = Trace.startSpan("writeTo", span);
|
||||
TraceScope writeScope = dfsClient.getTracer().
|
||||
newScope("DataStreamer#writeTo", spanId);
|
||||
try {
|
||||
one.writeTo(blockStream);
|
||||
blockStream.flush();
|
||||
|
@ -697,7 +701,10 @@ class DataStreamer extends Daemon {
|
|||
streamerClosed = true;
|
||||
}
|
||||
} finally {
|
||||
scope.close();
|
||||
if (scope != null) {
|
||||
scope.close();
|
||||
scope = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
closeInternal();
|
||||
|
@ -731,7 +738,8 @@ class DataStreamer extends Daemon {
|
|||
* @throws IOException
|
||||
*/
|
||||
void waitForAckedSeqno(long seqno) throws IOException {
|
||||
TraceScope scope = Trace.startSpan("waitForAckedSeqno", Sampler.NEVER);
|
||||
TraceScope scope = dfsClient.getTracer().
|
||||
newScope("waitForAckedSeqno");
|
||||
try {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Waiting for ack for: " + seqno);
|
||||
|
@ -781,7 +789,7 @@ class DataStreamer extends Daemon {
|
|||
while (!streamerClosed && dataQueue.size() + ackQueue.size() >
|
||||
dfsClient.getConf().getWriteMaxPackets()) {
|
||||
if (firstWait) {
|
||||
Span span = Trace.currentSpan();
|
||||
Span span = Tracer.getCurrentSpan();
|
||||
if (span != null) {
|
||||
span.addTimelineAnnotation("dataQueue.wait");
|
||||
}
|
||||
|
@ -802,7 +810,7 @@ class DataStreamer extends Daemon {
|
|||
}
|
||||
}
|
||||
} finally {
|
||||
Span span = Trace.currentSpan();
|
||||
Span span = Tracer.getCurrentSpan();
|
||||
if ((span != null) && (!firstWait)) {
|
||||
span.addTimelineAnnotation("end.wait");
|
||||
}
|
||||
|
@ -934,7 +942,7 @@ class DataStreamer extends Daemon {
|
|||
setName("ResponseProcessor for block " + block);
|
||||
PipelineAck ack = new PipelineAck();
|
||||
|
||||
TraceScope scope = NullScope.INSTANCE;
|
||||
TraceScope scope = null;
|
||||
while (!responderClosed && dfsClient.clientRunning && !isLastPacketInBlock) {
|
||||
// process responses from datanodes.
|
||||
try {
|
||||
|
@ -1021,8 +1029,11 @@ class DataStreamer extends Daemon {
|
|||
block.setNumBytes(one.getLastByteOffsetBlock());
|
||||
|
||||
synchronized (dataQueue) {
|
||||
scope = Trace.continueSpan(one.getTraceSpan());
|
||||
one.setTraceSpan(null);
|
||||
scope = one.getTraceScope();
|
||||
if (scope != null) {
|
||||
scope.reattach();
|
||||
one.setTraceScope(null);
|
||||
}
|
||||
lastAckedSeqno = seqno;
|
||||
ackQueue.removeFirst();
|
||||
dataQueue.notifyAll();
|
||||
|
@ -1043,7 +1054,10 @@ class DataStreamer extends Daemon {
|
|||
responderClosed = true;
|
||||
}
|
||||
} finally {
|
||||
if (scope != null) {
|
||||
scope.close();
|
||||
}
|
||||
scope = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1109,11 +1123,12 @@ class DataStreamer extends Daemon {
|
|||
// a client waiting on close() will be aware that the flush finished.
|
||||
synchronized (dataQueue) {
|
||||
DFSPacket endOfBlockPacket = dataQueue.remove(); // remove the end of block packet
|
||||
Span span = endOfBlockPacket.getTraceSpan();
|
||||
if (span != null) {
|
||||
// Close any trace span associated with this Packet
|
||||
TraceScope scope = Trace.continueSpan(span);
|
||||
// Close any trace span associated with this Packet
|
||||
TraceScope scope = endOfBlockPacket.getTraceScope();
|
||||
if (scope != null) {
|
||||
scope.reattach();
|
||||
scope.close();
|
||||
endOfBlockPacket.setTraceScope(null);
|
||||
}
|
||||
assert endOfBlockPacket.isLastPacketInBlock();
|
||||
assert lastAckedSeqno == endOfBlockPacket.getSeqno() - 1;
|
||||
|
@ -1741,7 +1756,7 @@ class DataStreamer extends Daemon {
|
|||
void queuePacket(DFSPacket packet) {
|
||||
synchronized (dataQueue) {
|
||||
if (packet == null) return;
|
||||
packet.addTraceParent(Trace.currentSpan());
|
||||
packet.addTraceParent(Tracer.getCurrentSpanId());
|
||||
dataQueue.addLast(packet);
|
||||
lastQueuedSeqno = packet.getSeqno();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
|
|
@ -47,9 +47,8 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -106,6 +105,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
int dataLeft = 0;
|
||||
|
||||
private final PeerCache peerCache;
|
||||
|
||||
private final Tracer tracer;
|
||||
|
||||
/* FSInputChecker interface */
|
||||
|
||||
|
@ -210,9 +211,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
protected synchronized int readChunk(long pos, byte[] buf, int offset,
|
||||
int len, byte[] checksumBuf)
|
||||
throws IOException {
|
||||
TraceScope scope =
|
||||
Trace.startSpan("RemoteBlockReader#readChunk(" + blockId + ")",
|
||||
Sampler.NEVER);
|
||||
TraceScope scope = tracer.
|
||||
newScope("RemoteBlockReader#readChunk(" + blockId + ")");
|
||||
try {
|
||||
return readChunkImpl(pos, buf, offset, len, checksumBuf);
|
||||
} finally {
|
||||
|
@ -346,7 +346,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
private RemoteBlockReader(String file, String bpid, long blockId,
|
||||
DataInputStream in, DataChecksum checksum, boolean verifyChecksum,
|
||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||
DatanodeID datanodeID, PeerCache peerCache) {
|
||||
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
|
||||
// Path is used only for printing block and file information in debug
|
||||
super(new Path("/" + Block.BLOCK_FILE_PREFIX + blockId +
|
||||
":" + bpid + ":of:"+ file)/*too non path-like?*/,
|
||||
|
@ -378,6 +378,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
||||
checksumSize = this.checksum.getChecksumSize();
|
||||
this.peerCache = peerCache;
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -402,7 +403,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
String clientName, Peer peer,
|
||||
DatanodeID datanodeID,
|
||||
PeerCache peerCache,
|
||||
CachingStrategy cachingStrategy)
|
||||
CachingStrategy cachingStrategy,
|
||||
Tracer tracer)
|
||||
throws IOException {
|
||||
// in and out will be closed when sock is closed (by the caller)
|
||||
final DataOutputStream out =
|
||||
|
@ -438,7 +440,7 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader {
|
|||
|
||||
return new RemoteBlockReader(file, block.getBlockPoolId(), block.getBlockId(),
|
||||
in, checksum, verifyChecksum, startOffset, firstChunkOffset, len,
|
||||
peer, datanodeID, peerCache);
|
||||
peer, datanodeID, peerCache, tracer);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -48,12 +48,11 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
|
|||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -126,6 +125,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
|
||||
private boolean sentStatusCode = false;
|
||||
|
||||
private final Tracer tracer;
|
||||
|
||||
@VisibleForTesting
|
||||
public Peer getPeer() {
|
||||
return peer;
|
||||
|
@ -144,8 +145,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
}
|
||||
|
||||
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
||||
TraceScope scope = Trace.startSpan(
|
||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
|
||||
TraceScope scope = tracer.newScope(
|
||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")");
|
||||
try {
|
||||
readNextPacket();
|
||||
} finally {
|
||||
|
@ -172,8 +173,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
@Override
|
||||
public synchronized int read(ByteBuffer buf) throws IOException {
|
||||
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
||||
TraceScope scope = Trace.startSpan(
|
||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")", Sampler.NEVER);
|
||||
TraceScope scope = tracer.newScope(
|
||||
"RemoteBlockReader2#readNextPacket(" + blockId + ")");
|
||||
try {
|
||||
readNextPacket();
|
||||
} finally {
|
||||
|
@ -292,7 +293,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
protected RemoteBlockReader2(String file, String bpid, long blockId,
|
||||
DataChecksum checksum, boolean verifyChecksum,
|
||||
long startOffset, long firstChunkOffset, long bytesToRead, Peer peer,
|
||||
DatanodeID datanodeID, PeerCache peerCache) {
|
||||
DatanodeID datanodeID, PeerCache peerCache, Tracer tracer) {
|
||||
this.isLocal = DFSUtilClient.isLocalAddress(NetUtils.
|
||||
createSocketAddr(datanodeID.getXferAddr()));
|
||||
// Path is used only for printing block and file information in debug
|
||||
|
@ -313,6 +314,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
this.bytesNeededToFinish = bytesToRead + (startOffset - firstChunkOffset);
|
||||
bytesPerChecksum = this.checksum.getBytesPerChecksum();
|
||||
checksumSize = this.checksum.getChecksumSize();
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
|
||||
|
@ -407,7 +409,8 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
String clientName,
|
||||
Peer peer, DatanodeID datanodeID,
|
||||
PeerCache peerCache,
|
||||
CachingStrategy cachingStrategy) throws IOException {
|
||||
CachingStrategy cachingStrategy,
|
||||
Tracer tracer) throws IOException {
|
||||
// in and out will be closed when sock is closed (by the caller)
|
||||
final DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
|
||||
peer.getOutputStream()));
|
||||
|
@ -440,7 +443,7 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
|
||||
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
|
||||
checksum, verifyChecksum, startOffset, firstChunkOffset, len, peer,
|
||||
datanodeID, peerCache);
|
||||
datanodeID, peerCache, tracer);
|
||||
}
|
||||
|
||||
static void checkSuccess(
|
||||
|
|
|
@ -25,11 +25,10 @@ import org.apache.hadoop.classification.InterfaceStability;
|
|||
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
||||
import org.apache.hadoop.fs.InvalidRequestException;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/**
|
||||
* CacheDirectiveIterator is a remote iterator that iterates cache directives.
|
||||
|
@ -42,14 +41,14 @@ public class CacheDirectiveIterator
|
|||
|
||||
private CacheDirectiveInfo filter;
|
||||
private final ClientProtocol namenode;
|
||||
private final Sampler<?> traceSampler;
|
||||
private final Tracer tracer;
|
||||
|
||||
public CacheDirectiveIterator(ClientProtocol namenode,
|
||||
CacheDirectiveInfo filter, Sampler<?> traceSampler) {
|
||||
CacheDirectiveInfo filter, Tracer tracer) {
|
||||
super(0L);
|
||||
this.namenode = namenode;
|
||||
this.filter = filter;
|
||||
this.traceSampler = traceSampler;
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
private static CacheDirectiveInfo removeIdFromFilter(CacheDirectiveInfo filter) {
|
||||
|
@ -94,7 +93,7 @@ public class CacheDirectiveIterator
|
|||
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
|
||||
throws IOException {
|
||||
BatchedEntries<CacheDirectiveEntry> entries = null;
|
||||
TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler);
|
||||
TraceScope scope = tracer.newScope("listCacheDirectives");
|
||||
try {
|
||||
entries = namenode.listCacheDirectives(prevKey, filter);
|
||||
} catch (IOException e) {
|
||||
|
|
|
@ -23,9 +23,8 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/**
|
||||
* CachePoolIterator is a remote iterator that iterates cache pools.
|
||||
|
@ -37,18 +36,18 @@ public class CachePoolIterator
|
|||
extends BatchedRemoteIterator<String, CachePoolEntry> {
|
||||
|
||||
private final ClientProtocol namenode;
|
||||
private final Sampler traceSampler;
|
||||
private final Tracer tracer;
|
||||
|
||||
public CachePoolIterator(ClientProtocol namenode, Sampler traceSampler) {
|
||||
public CachePoolIterator(ClientProtocol namenode, Tracer tracer) {
|
||||
super("");
|
||||
this.namenode = namenode;
|
||||
this.traceSampler = traceSampler;
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchedEntries<CachePoolEntry> makeRequest(String prevKey)
|
||||
throws IOException {
|
||||
TraceScope scope = Trace.startSpan("listCachePools", traceSampler);
|
||||
TraceScope scope = tracer.newScope("listCachePools");
|
||||
try {
|
||||
return namenode.listCachePools(prevKey);
|
||||
} finally {
|
||||
|
|
|
@ -23,9 +23,8 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.fs.BatchedRemoteIterator;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/**
|
||||
* EncryptionZoneIterator is a remote iterator that iterates over encryption
|
||||
|
@ -37,19 +36,18 @@ public class EncryptionZoneIterator
|
|||
extends BatchedRemoteIterator<Long, EncryptionZone> {
|
||||
|
||||
private final ClientProtocol namenode;
|
||||
private final Sampler<?> traceSampler;
|
||||
private final Tracer tracer;
|
||||
|
||||
public EncryptionZoneIterator(ClientProtocol namenode,
|
||||
Sampler<?> traceSampler) {
|
||||
public EncryptionZoneIterator(ClientProtocol namenode, Tracer tracer) {
|
||||
super(Long.valueOf(0));
|
||||
this.namenode = namenode;
|
||||
this.traceSampler = traceSampler;
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
@Override
|
||||
public BatchedEntries<EncryptionZone> makeRequest(Long prevId)
|
||||
throws IOException {
|
||||
TraceScope scope = Trace.startSpan("listEncryptionZones", traceSampler);
|
||||
TraceScope scope = tracer.newScope("listEncryptionZones");
|
||||
try {
|
||||
return namenode.listEncryptionZones(prevId);
|
||||
} finally {
|
||||
|
|
|
@ -35,10 +35,8 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
|
|||
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceInfo;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/**
|
||||
* Static utilities for dealing with the protocol buffers used by the
|
||||
|
@ -89,39 +87,21 @@ public abstract class DataTransferProtoUtil {
|
|||
BaseHeaderProto.Builder builder = BaseHeaderProto.newBuilder()
|
||||
.setBlock(PBHelperClient.convert(blk))
|
||||
.setToken(PBHelperClient.convert(blockToken));
|
||||
if (Trace.isTracing()) {
|
||||
Span s = Trace.currentSpan();
|
||||
SpanId spanId = Tracer.getCurrentSpanId();
|
||||
if (spanId.isValid()) {
|
||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
||||
.setTraceId(s.getTraceId())
|
||||
.setParentId(s.getSpanId()));
|
||||
.setTraceId(spanId.getHigh())
|
||||
.setParentId(spanId.getLow()));
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
public static TraceInfo fromProto(DataTransferTraceInfoProto proto) {
|
||||
if (proto == null) return null;
|
||||
if (!proto.hasTraceId()) return null;
|
||||
return new TraceInfo(proto.getTraceId(), proto.getParentId());
|
||||
}
|
||||
|
||||
public static TraceScope continueTraceSpan(ClientOperationHeaderProto header,
|
||||
String description) {
|
||||
return continueTraceSpan(header.getBaseHeader(), description);
|
||||
}
|
||||
|
||||
public static TraceScope continueTraceSpan(BaseHeaderProto header,
|
||||
String description) {
|
||||
return continueTraceSpan(header.getTraceInfo(), description);
|
||||
}
|
||||
|
||||
public static TraceScope continueTraceSpan(DataTransferTraceInfoProto proto,
|
||||
String description) {
|
||||
TraceScope scope = null;
|
||||
TraceInfo info = fromProto(proto);
|
||||
if (info != null) {
|
||||
scope = Trace.startSpan(description, info);
|
||||
public static SpanId fromProto(DataTransferTraceInfoProto proto) {
|
||||
if ((proto != null) && proto.hasTraceId() &&
|
||||
proto.hasParentId()) {
|
||||
return new SpanId(proto.getTraceId(), proto.getParentId());
|
||||
}
|
||||
return scope;
|
||||
return null;
|
||||
}
|
||||
|
||||
public static void checkBlockOpStatus(
|
||||
|
|
|
@ -48,8 +48,8 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.protobuf.Message;
|
||||
|
||||
|
@ -200,10 +200,11 @@ public class Sender implements DataTransferProtocol {
|
|||
ReleaseShortCircuitAccessRequestProto.Builder builder =
|
||||
ReleaseShortCircuitAccessRequestProto.newBuilder().
|
||||
setSlotId(PBHelperClient.convert(slotId));
|
||||
if (Trace.isTracing()) {
|
||||
Span s = Trace.currentSpan();
|
||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
||||
.setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
|
||||
SpanId spanId = Tracer.getCurrentSpanId();
|
||||
if (spanId.isValid()) {
|
||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
|
||||
setTraceId(spanId.getHigh()).
|
||||
setParentId(spanId.getLow()));
|
||||
}
|
||||
ReleaseShortCircuitAccessRequestProto proto = builder.build();
|
||||
send(out, Op.RELEASE_SHORT_CIRCUIT_FDS, proto);
|
||||
|
@ -214,10 +215,11 @@ public class Sender implements DataTransferProtocol {
|
|||
ShortCircuitShmRequestProto.Builder builder =
|
||||
ShortCircuitShmRequestProto.newBuilder().
|
||||
setClientName(clientName);
|
||||
if (Trace.isTracing()) {
|
||||
Span s = Trace.currentSpan();
|
||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder()
|
||||
.setTraceId(s.getTraceId()).setParentId(s.getSpanId()));
|
||||
SpanId spanId = Tracer.getCurrentSpanId();
|
||||
if (spanId.isValid()) {
|
||||
builder.setTraceInfo(DataTransferTraceInfoProto.newBuilder().
|
||||
setTraceId(spanId.getHigh()).
|
||||
setParentId(spanId.getLow()));
|
||||
}
|
||||
ShortCircuitShmRequestProto proto = builder.build();
|
||||
send(out, Op.REQUEST_SHORT_CIRCUIT_SHM, proto);
|
||||
|
|
|
@ -638,6 +638,8 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-8740. Move DistributedFileSystem to hadoop-hdfs-client. (Mingliang Liu
|
||||
via wheat9)
|
||||
|
||||
HDFS-9080. Update htrace version to 4.0.1 (cmccabe)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
|
@ -192,7 +192,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
|||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.htrace</groupId>
|
||||
<artifactId>htrace-core</artifactId>
|
||||
<artifactId>htrace-core4</artifactId>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.hadoop</groupId>
|
||||
|
|
|
@ -64,12 +64,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
|
|||
public static final String DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT =
|
||||
HdfsClientConfigKeys.DFS_WEBHDFS_ACL_PERMISSION_PATTERN_DEFAULT;
|
||||
|
||||
// HDFS HTrace configuration is controlled by dfs.htrace.spanreceiver.classes,
|
||||
// etc.
|
||||
public static final String DFS_SERVER_HTRACE_PREFIX = "dfs.htrace.";
|
||||
@Deprecated
|
||||
public static final String DFS_CLIENT_HTRACE_PREFIX =
|
||||
HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX;
|
||||
public static final String DFS_CLIENT_HTRACE_SAMPLER_CLASSES =
|
||||
"dfs.client.htrace.sampler.classes";
|
||||
|
||||
// HA related configuration
|
||||
public static final String DFS_DATANODE_RESTART_REPLICA_EXPIRY_KEY = "dfs.datanode.restart.replica.expiration";
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
package org.apache.hadoop.hdfs.protocol.datatransfer;
|
||||
|
||||
import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.fromProto;
|
||||
import static org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.continueTraceSpan;
|
||||
import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
|
||||
|
||||
import java.io.DataInputStream;
|
||||
|
@ -27,7 +26,10 @@ import java.io.IOException;
|
|||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BaseHeaderProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.CachingStrategyProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientOperationHeaderProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferTraceInfoProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpCopyBlockProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpReadBlockProto;
|
||||
|
@ -40,14 +42,21 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmR
|
|||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||
import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
/** Receiver */
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public abstract class Receiver implements DataTransferProtocol {
|
||||
private final Tracer tracer;
|
||||
protected DataInputStream in;
|
||||
|
||||
|
||||
protected Receiver(Tracer tracer) {
|
||||
this.tracer = tracer;
|
||||
}
|
||||
|
||||
/** Initialize a receiver for DataTransferProtocol with a socket. */
|
||||
protected void initialize(final DataInputStream in) {
|
||||
this.in = in;
|
||||
|
@ -64,6 +73,26 @@ public abstract class Receiver implements DataTransferProtocol {
|
|||
return Op.read(in);
|
||||
}
|
||||
|
||||
private TraceScope continueTraceSpan(DataTransferTraceInfoProto proto,
|
||||
String description) {
|
||||
TraceScope scope = null;
|
||||
SpanId spanId = fromProto(proto);
|
||||
if (spanId != null) {
|
||||
scope = tracer.newScope(description, spanId);
|
||||
}
|
||||
return scope;
|
||||
}
|
||||
|
||||
private TraceScope continueTraceSpan(ClientOperationHeaderProto header,
|
||||
String description) {
|
||||
return continueTraceSpan(header.getBaseHeader(), description);
|
||||
}
|
||||
|
||||
private TraceScope continueTraceSpan(BaseHeaderProto header,
|
||||
String description) {
|
||||
return continueTraceSpan(header.getTraceInfo(), description);
|
||||
}
|
||||
|
||||
/** Process op by the corresponding method. */
|
||||
protected final void processOp(Op op) throws IOException {
|
||||
switch(op) {
|
||||
|
|
|
@ -43,10 +43,12 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
|
|||
import org.apache.hadoop.metrics2.source.JvmMetrics;
|
||||
import org.apache.hadoop.metrics2.util.MBeans;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.util.DiskChecker;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
@ -69,6 +71,7 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
|
|||
private ObjectName journalNodeInfoBeanName;
|
||||
private String httpServerURI;
|
||||
private File localDir;
|
||||
Tracer tracer;
|
||||
|
||||
static {
|
||||
HdfsConfiguration.init();
|
||||
|
@ -105,6 +108,11 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
|
|||
this.localDir = new File(
|
||||
conf.get(DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_KEY,
|
||||
DFSConfigKeys.DFS_JOURNALNODE_EDITS_DIR_DEFAULT).trim());
|
||||
if (this.tracer == null) {
|
||||
this.tracer = new Tracer.Builder("JournalNode").
|
||||
conf(TraceUtils.wrapHadoopConf("journalnode.htrace", conf)).
|
||||
build();
|
||||
}
|
||||
}
|
||||
|
||||
private static void validateAndCreateJournalDir(File dir) throws IOException {
|
||||
|
@ -203,6 +211,10 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
|
|||
MBeans.unregister(journalNodeInfoBeanName);
|
||||
journalNodeInfoBeanName = null;
|
||||
}
|
||||
if (tracer != null) {
|
||||
tracer.close();
|
||||
tracer = null;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -326,5 +338,4 @@ public class JournalNode implements Tool, Configurable, JournalNodeMXBean {
|
|||
public Long getJournalCTime(String journalId) throws IOException {
|
||||
return getOrCreateJournal(journalId).getJournalCTime();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -85,6 +85,7 @@ class JournalNodeRpcServer implements QJournalProtocol {
|
|||
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
|
||||
server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
|
||||
}
|
||||
this.server.setTracer(jn.tracer);
|
||||
}
|
||||
|
||||
void start() {
|
||||
|
|
|
@ -47,9 +47,8 @@ import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
|
|||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.net.SocketOutputStream;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.Sampler;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
|
||||
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_DONTNEED;
|
||||
import static org.apache.hadoop.io.nativeio.NativeIO.POSIX.POSIX_FADV_SEQUENTIAL;
|
||||
|
@ -708,8 +707,8 @@ class BlockSender implements java.io.Closeable {
|
|||
*/
|
||||
long sendBlock(DataOutputStream out, OutputStream baseStream,
|
||||
DataTransferThrottler throttler) throws IOException {
|
||||
TraceScope scope =
|
||||
Trace.startSpan("sendBlock_" + block.getBlockId(), Sampler.NEVER);
|
||||
TraceScope scope = datanode.tracer.
|
||||
newScope("sendBlock_" + block.getBlockId());
|
||||
try {
|
||||
return doSendBlock(out, baseStream, throttler);
|
||||
} finally {
|
||||
|
|
|
@ -189,12 +189,13 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.tracing.SpanReceiverInfo;
|
||||
import org.apache.hadoop.tracing.TraceAdminPB.TraceAdminService;
|
||||
import org.apache.hadoop.tracing.TraceAdminProtocol;
|
||||
import org.apache.hadoop.tracing.TraceAdminProtocolPB;
|
||||
import org.apache.hadoop.tracing.TraceAdminProtocolServerSideTranslatorPB;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.tracing.TracerConfigurationManager;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.DiskChecker;
|
||||
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
||||
|
@ -204,6 +205,7 @@ import org.apache.hadoop.util.ServicePlugin;
|
|||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.hadoop.util.VersionInfo;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.mortbay.util.ajax.JSON;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -288,6 +290,8 @@ public class DataNode extends ReconfigurableBase
|
|||
|
||||
public static final Log METRICS_LOG = LogFactory.getLog("DataNodeMetricsLog");
|
||||
|
||||
private static final String DATANODE_HTRACE_PREFIX = "datanode.htrace.";
|
||||
|
||||
/**
|
||||
* Use {@link NetUtils#createSocketAddr(String)} instead.
|
||||
*/
|
||||
|
@ -368,11 +372,18 @@ public class DataNode extends ReconfigurableBase
|
|||
private boolean isPermissionEnabled;
|
||||
private String dnUserName = null;
|
||||
|
||||
private SpanReceiverHost spanReceiverHost;
|
||||
final Tracer tracer;
|
||||
private final TracerConfigurationManager tracerConfigurationManager;
|
||||
private static final int NUM_CORES = Runtime.getRuntime()
|
||||
.availableProcessors();
|
||||
private static final double CONGESTION_RATIO = 1.5;
|
||||
|
||||
private static Tracer createTracer(Configuration conf) {
|
||||
return new Tracer.Builder("DataNode").
|
||||
conf(TraceUtils.wrapHadoopConf(DATANODE_HTRACE_PREFIX , conf)).
|
||||
build();
|
||||
}
|
||||
|
||||
private long[] oobTimeouts; /** timeout value of each OOB type */
|
||||
|
||||
private ScheduledThreadPoolExecutor metricsLoggerTimer;
|
||||
|
@ -385,6 +396,9 @@ public class DataNode extends ReconfigurableBase
|
|||
DataNode(final Configuration conf) {
|
||||
super(conf);
|
||||
this.blockScanner = new BlockScanner(this, conf);
|
||||
this.tracer = createTracer(conf);
|
||||
this.tracerConfigurationManager =
|
||||
new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
|
||||
this.fileDescriptorPassingDisabledReason = null;
|
||||
this.maxNumberOfBlocksToLog = 0;
|
||||
this.confVersion = null;
|
||||
|
@ -405,6 +419,9 @@ public class DataNode extends ReconfigurableBase
|
|||
final List<StorageLocation> dataDirs,
|
||||
final SecureResources resources) throws IOException {
|
||||
super(conf);
|
||||
this.tracer = createTracer(conf);
|
||||
this.tracerConfigurationManager =
|
||||
new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
|
||||
this.blockScanner = new BlockScanner(this, conf);
|
||||
this.lastDiskErrorCheck = 0;
|
||||
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
|
||||
|
@ -1144,9 +1161,6 @@ public class DataNode extends ReconfigurableBase
|
|||
this.dnConf = new DNConf(conf);
|
||||
checkSecureConfig(dnConf, conf, resources);
|
||||
|
||||
this.spanReceiverHost =
|
||||
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
|
||||
|
||||
if (dnConf.maxLockedMemory > 0) {
|
||||
if (!NativeIO.POSIX.getCacheManipulator().verifyCanMlock()) {
|
||||
throw new RuntimeException(String.format(
|
||||
|
@ -1851,9 +1865,6 @@ public class DataNode extends ReconfigurableBase
|
|||
MBeans.unregister(dataNodeInfoBeanName);
|
||||
dataNodeInfoBeanName = null;
|
||||
}
|
||||
if (this.spanReceiverHost != null) {
|
||||
this.spanReceiverHost.closeReceivers();
|
||||
}
|
||||
if (shortCircuitRegistry != null) shortCircuitRegistry.shutdown();
|
||||
LOG.info("Shutdown complete.");
|
||||
synchronized(this) {
|
||||
|
@ -1862,6 +1873,7 @@ public class DataNode extends ReconfigurableBase
|
|||
// Notify the main thread.
|
||||
notifyAll();
|
||||
}
|
||||
tracer.close();
|
||||
}
|
||||
|
||||
|
||||
|
@ -2294,6 +2306,7 @@ public class DataNode extends ReconfigurableBase
|
|||
if (localDataXceiverServer != null) {
|
||||
localDataXceiverServer.start();
|
||||
}
|
||||
ipcServer.setTracer(tracer);
|
||||
ipcServer.start();
|
||||
startPlugins(conf);
|
||||
}
|
||||
|
@ -3283,19 +3296,19 @@ public class DataNode extends ReconfigurableBase
|
|||
@Override
|
||||
public SpanReceiverInfo[] listSpanReceivers() throws IOException {
|
||||
checkSuperuserPrivilege();
|
||||
return spanReceiverHost.listSpanReceivers();
|
||||
return tracerConfigurationManager.listSpanReceivers();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long addSpanReceiver(SpanReceiverInfo info) throws IOException {
|
||||
checkSuperuserPrivilege();
|
||||
return spanReceiverHost.addSpanReceiver(info);
|
||||
return tracerConfigurationManager.addSpanReceiver(info);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeSpanReceiver(long id) throws IOException {
|
||||
checkSuperuserPrivilege();
|
||||
spanReceiverHost.removeSpanReceiver(id);
|
||||
tracerConfigurationManager.removeSpanReceiver(id);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -126,7 +126,7 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
|
||||
private DataXceiver(Peer peer, DataNode datanode,
|
||||
DataXceiverServer dataXceiverServer) throws IOException {
|
||||
|
||||
super(datanode.tracer);
|
||||
this.peer = peer;
|
||||
this.dnConf = datanode.getDnConf();
|
||||
this.socketIn = peer.getInputStream();
|
||||
|
|
|
@ -71,12 +71,14 @@ import org.apache.hadoop.security.SecurityUtil;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
|
||||
import org.apache.hadoop.tools.GetUserMappingsProtocol;
|
||||
import org.apache.hadoop.tracing.SpanReceiverHost;
|
||||
import org.apache.hadoop.tracing.TraceAdminProtocol;
|
||||
import org.apache.hadoop.tracing.TraceUtils;
|
||||
import org.apache.hadoop.tracing.TracerConfigurationManager;
|
||||
import org.apache.hadoop.util.ExitUtil.ExitException;
|
||||
import org.apache.hadoop.util.JvmPauseMonitor;
|
||||
import org.apache.hadoop.util.ServicePlugin;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -318,6 +320,8 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
public static final HAState ACTIVE_STATE = new ActiveState();
|
||||
public static final HAState STANDBY_STATE = new StandbyState();
|
||||
|
||||
private static final String NAMENODE_HTRACE_PREFIX = "namenode.htrace.";
|
||||
|
||||
public static final Log MetricsLog =
|
||||
LogFactory.getLog("NameNodeMetricsLog");
|
||||
|
||||
|
@ -345,7 +349,8 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
|
||||
private JvmPauseMonitor pauseMonitor;
|
||||
private ObjectName nameNodeStatusBeanName;
|
||||
SpanReceiverHost spanReceiverHost;
|
||||
protected final Tracer tracer;
|
||||
protected final TracerConfigurationManager tracerConfigurationManager;
|
||||
ScheduledThreadPoolExecutor metricsLoggerTimer;
|
||||
|
||||
/**
|
||||
|
@ -620,9 +625,6 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
startHttpServer(conf);
|
||||
}
|
||||
|
||||
this.spanReceiverHost =
|
||||
SpanReceiverHost.get(conf, DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX);
|
||||
|
||||
loadNamesystem(conf);
|
||||
|
||||
rpcServer = createRpcServer(conf);
|
||||
|
@ -812,8 +814,13 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
this(conf, NamenodeRole.NAMENODE);
|
||||
}
|
||||
|
||||
protected NameNode(Configuration conf, NamenodeRole role)
|
||||
throws IOException {
|
||||
protected NameNode(Configuration conf, NamenodeRole role)
|
||||
throws IOException {
|
||||
this.tracer = new Tracer.Builder("NameNode").
|
||||
conf(TraceUtils.wrapHadoopConf(NAMENODE_HTRACE_PREFIX, conf)).
|
||||
build();
|
||||
this.tracerConfigurationManager =
|
||||
new TracerConfigurationManager(NAMENODE_HTRACE_PREFIX, conf);
|
||||
this.conf = conf;
|
||||
this.role = role;
|
||||
setClientNamenodeAddress(conf);
|
||||
|
@ -896,10 +903,8 @@ public class NameNode implements NameNodeStatusMXBean {
|
|||
MBeans.unregister(nameNodeStatusBeanName);
|
||||
nameNodeStatusBeanName = null;
|
||||
}
|
||||
if (this.spanReceiverHost != null) {
|
||||
this.spanReceiverHost.closeReceivers();
|
||||
}
|
||||
}
|
||||
tracer.close();
|
||||
}
|
||||
|
||||
synchronized boolean isStopRequested() {
|
||||
|
|
|
@ -412,6 +412,10 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
FSLimitException.PathComponentTooLongException.class,
|
||||
FSLimitException.MaxDirectoryItemsExceededException.class,
|
||||
UnresolvedPathException.class);
|
||||
clientRpcServer.setTracer(nn.tracer);
|
||||
if (serviceRpcServer != null) {
|
||||
clientRpcServer.setTracer(nn.tracer);
|
||||
}
|
||||
}
|
||||
|
||||
/** Allow access to the client RPC server for testing */
|
||||
|
@ -2019,20 +2023,20 @@ class NameNodeRpcServer implements NamenodeProtocols {
|
|||
public SpanReceiverInfo[] listSpanReceivers() throws IOException {
|
||||
checkNNStartup();
|
||||
namesystem.checkSuperuserPrivilege();
|
||||
return nn.spanReceiverHost.listSpanReceivers();
|
||||
return nn.tracerConfigurationManager.listSpanReceivers();
|
||||
}
|
||||
|
||||
@Override // TraceAdminProtocol
|
||||
public long addSpanReceiver(SpanReceiverInfo info) throws IOException {
|
||||
checkNNStartup();
|
||||
namesystem.checkSuperuserPrivilege();
|
||||
return nn.spanReceiverHost.addSpanReceiver(info);
|
||||
return nn.tracerConfigurationManager.addSpanReceiver(info);
|
||||
}
|
||||
|
||||
@Override // TraceAdminProtocol
|
||||
public void removeSpanReceiver(long id) throws IOException {
|
||||
checkNNStartup();
|
||||
namesystem.checkSuperuserPrivilege();
|
||||
nn.spanReceiverHost.removeSpanReceiver(id);
|
||||
nn.tracerConfigurationManager.removeSpanReceiver(id);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -81,6 +81,7 @@ import org.apache.hadoop.security.AccessControlException;
|
|||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
|
@ -136,6 +137,8 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
|
||||
private boolean showReplicaDetails = false;
|
||||
private long staleInterval;
|
||||
private Tracer tracer;
|
||||
|
||||
/**
|
||||
* True if we encountered an internal error during FSCK, such as not being
|
||||
* able to delete a corrupt file.
|
||||
|
@ -198,6 +201,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
this.staleInterval =
|
||||
conf.getLong(DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_KEY,
|
||||
DFSConfigKeys.DFS_NAMENODE_STALE_DATANODE_INTERVAL_DEFAULT);
|
||||
this.tracer = new Tracer.Builder("NamenodeFsck").build();
|
||||
|
||||
for (Iterator<String> it = pmap.keySet().iterator(); it.hasNext();) {
|
||||
String key = it.next();
|
||||
|
@ -850,6 +854,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory {
|
|||
setCachingStrategy(CachingStrategy.newDropBehind()).
|
||||
setClientCacheContext(dfs.getClientContext()).
|
||||
setConfiguration(namenode.conf).
|
||||
setTracer(tracer).
|
||||
setRemotePeerFactory(new RemotePeerFactory() {
|
||||
@Override
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
|
|
|
@ -2440,19 +2440,10 @@
|
|||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.htrace.spanreceiver.classes</name>
|
||||
<name>dfs.client.htrace.sampler.classes</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The class name of the HTrace SpanReceiver for the NameNode and DataNode.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
<property>
|
||||
<name>dfs.client.htrace.spanreceiver.classes</name>
|
||||
<value></value>
|
||||
<description>
|
||||
The class name of the HTrace SpanReceiver for the HDFS client. You do not
|
||||
need to enable this if your client program has been modified to use HTrace.
|
||||
The class names of the HTrace Samplers to use for the HDFS client.
|
||||
</description>
|
||||
</property>
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.Random;
|
|||
|
||||
import org.apache.commons.io.IOUtils;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
|
@ -192,6 +193,7 @@ public class BlockReaderTestUtil {
|
|||
setCachingStrategy(CachingStrategy.newDefaultStrategy()).
|
||||
setConfiguration(fs.getConf()).
|
||||
setAllowShortCircuitLocalReads(true).
|
||||
setTracer(FsTracer.get(fs.getConf())).
|
||||
setRemotePeerFactory(new RemotePeerFactory() {
|
||||
@Override
|
||||
public Peer newConnectedPeer(InetSocketAddress addr,
|
||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitCache;
|
|||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitReplica;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.net.unix.DomainSocket;
|
||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||
|
@ -195,6 +196,7 @@ public class TestBlockReaderLocal {
|
|||
setShortCircuitReplica(replica).
|
||||
setCachingStrategy(new CachingStrategy(false, readahead)).
|
||||
setVerifyChecksum(checksum).
|
||||
setTracer(FsTracer.get(conf)).
|
||||
build();
|
||||
dataIn = null;
|
||||
metaIn = null;
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.util.concurrent.atomic.AtomicReference;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
|
||||
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
||||
|
@ -34,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.Assert;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -119,6 +121,7 @@ public class TestDFSOutputStream {
|
|||
DfsClientConf dfsClientConf = mock(DfsClientConf.class);
|
||||
DFSClient client = mock(DFSClient.class);
|
||||
when(client.getConf()).thenReturn(dfsClientConf);
|
||||
when(client.getTracer()).thenReturn(FsTracer.get(new Configuration()));
|
||||
client.clientRunning = true;
|
||||
DataStreamer stream = new DataStreamer(
|
||||
mock(HdfsFileStatus.class),
|
||||
|
@ -139,7 +142,7 @@ public class TestDFSOutputStream {
|
|||
Whitebox.getInternalState(stream, "congestedNodes");
|
||||
congestedNodes.add(mock(DatanodeInfo.class));
|
||||
DFSPacket packet = mock(DFSPacket.class);
|
||||
when(packet.getTraceParents()).thenReturn(new long[] {});
|
||||
when(packet.getTraceParents()).thenReturn(new SpanId[] {});
|
||||
dataQueue.add(packet);
|
||||
stream.run();
|
||||
Assert.assertTrue(congestedNodes.isEmpty());
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
|
|||
import java.util.Random;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
|
||||
import org.apache.hadoop.io.DataOutputBuffer;
|
||||
import org.apache.htrace.core.SpanId;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -70,24 +71,24 @@ public class TestDFSPacket {
|
|||
public void testAddParentsGetParents() throws Exception {
|
||||
DFSPacket p = new DFSPacket(null, maxChunksPerPacket,
|
||||
0, 0, checksumSize, false);
|
||||
long parents[] = p.getTraceParents();
|
||||
SpanId parents[] = p.getTraceParents();
|
||||
Assert.assertEquals(0, parents.length);
|
||||
p.addTraceParent(123);
|
||||
p.addTraceParent(123);
|
||||
p.addTraceParent(new SpanId(0, 123));
|
||||
p.addTraceParent(new SpanId(0, 123));
|
||||
parents = p.getTraceParents();
|
||||
Assert.assertEquals(1, parents.length);
|
||||
Assert.assertEquals(123, parents[0]);
|
||||
Assert.assertEquals(new SpanId(0, 123), parents[0]);
|
||||
parents = p.getTraceParents(); // test calling 'get' again.
|
||||
Assert.assertEquals(1, parents.length);
|
||||
Assert.assertEquals(123, parents[0]);
|
||||
p.addTraceParent(1);
|
||||
p.addTraceParent(456);
|
||||
p.addTraceParent(789);
|
||||
Assert.assertEquals(new SpanId(0, 123), parents[0]);
|
||||
p.addTraceParent(new SpanId(0, 1));
|
||||
p.addTraceParent(new SpanId(0, 456));
|
||||
p.addTraceParent(new SpanId(0, 789));
|
||||
parents = p.getTraceParents();
|
||||
Assert.assertEquals(4, parents.length);
|
||||
Assert.assertEquals(1, parents[0]);
|
||||
Assert.assertEquals(123, parents[1]);
|
||||
Assert.assertEquals(456, parents[2]);
|
||||
Assert.assertEquals(789, parents[3]);
|
||||
Assert.assertEquals(new SpanId(0, 1), parents[0]);
|
||||
Assert.assertEquals(new SpanId(0, 123), parents[1]);
|
||||
Assert.assertEquals(new SpanId(0, 456), parents[2]);
|
||||
Assert.assertEquals(new SpanId(0, 789), parents[3]);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.fs.CacheFlag;
|
|||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FileSystemTestHelper;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.InvalidRequestException;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
|
@ -89,7 +90,6 @@ import org.apache.hadoop.util.GSet;
|
|||
import org.apache.log4j.Level;
|
||||
import org.apache.log4j.LogManager;
|
||||
import org.apache.log4j.Logger;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
|
@ -910,7 +910,7 @@ public class TestCacheDirectives {
|
|||
|
||||
// Uncache and check each path in sequence
|
||||
RemoteIterator<CacheDirectiveEntry> entries =
|
||||
new CacheDirectiveIterator(nnRpc, null, Sampler.NEVER);
|
||||
new CacheDirectiveIterator(nnRpc, null, FsTracer.get(conf));
|
||||
for (int i=0; i<numFiles; i++) {
|
||||
CacheDirectiveEntry entry = entries.next();
|
||||
nnRpc.removeCacheDirective(entry.getInfo().getId());
|
||||
|
|
|
@ -387,6 +387,7 @@ public class TestFsck {
|
|||
|
||||
// Copy the non-corrupt blocks of corruptFileName to lost+found.
|
||||
outStr = runFsck(conf, 1, false, "/", "-move");
|
||||
FSImage.LOG.info("WATERMELON: outStr = " + outStr);
|
||||
assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS));
|
||||
|
||||
// Make sure that we properly copied the block files from the DataNodes
|
||||
|
|
|
@ -77,8 +77,8 @@ public class TestHdfsConfigFields extends TestConfigurationFieldsBase {
|
|||
// Some properties have moved to HdfsClientConfigKeys
|
||||
xmlPropsToSkipCompare.add("dfs.client.short.circuit.replica.stale.threshold.ms");
|
||||
|
||||
// Ignore SpanReceiveHost properties
|
||||
xmlPropsToSkipCompare.add("dfs.htrace.spanreceiver.classes");
|
||||
xmlPropsToSkipCompare.add("dfs.client.htrace.spanreceiver.classes");
|
||||
// Ignore HTrace properties
|
||||
xmlPropsToSkipCompare.add("fs.client.htrace");
|
||||
xmlPropsToSkipCompare.add("hadoop.htrace");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
|
@ -58,8 +59,8 @@ public class TestTraceAdmin {
|
|||
public void testCreateAndDestroySpanReceiver() throws Exception {
|
||||
Configuration conf = new Configuration();
|
||||
conf = new Configuration();
|
||||
conf.set(DFSConfigKeys.DFS_SERVER_HTRACE_PREFIX +
|
||||
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX, "");
|
||||
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
|
||||
Tracer.SPAN_RECEIVER_CLASSES_KEY, "");
|
||||
MiniDFSCluster cluster =
|
||||
new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
|
||||
cluster.waitActive();
|
||||
|
@ -74,12 +75,12 @@ public class TestTraceAdmin {
|
|||
Assert.assertEquals("ret:0, Added trace span receiver 1 with " +
|
||||
"configuration dfs.htrace.local-file-span-receiver.path = " + tracePath + NEWLINE,
|
||||
runTraceCommand(trace, "-add", "-host", getHostPortForNN(cluster),
|
||||
"-class", "org.apache.htrace.impl.LocalFileSpanReceiver",
|
||||
"-class", "org.apache.htrace.core.LocalFileSpanReceiver",
|
||||
"-Cdfs.htrace.local-file-span-receiver.path=" + tracePath));
|
||||
String list =
|
||||
runTraceCommand(trace, "-list", "-host", getHostPortForNN(cluster));
|
||||
Assert.assertTrue(list.startsWith("ret:0"));
|
||||
Assert.assertTrue(list.contains("1 org.apache.htrace.impl.LocalFileSpanReceiver"));
|
||||
Assert.assertTrue(list.contains("1 org.apache.htrace.core.LocalFileSpanReceiver"));
|
||||
Assert.assertEquals("ret:0, Removed trace span receiver 1" + NEWLINE,
|
||||
runTraceCommand(trace, "-remove", "1", "-host",
|
||||
getHostPortForNN(cluster)));
|
||||
|
|
|
@ -17,50 +17,72 @@
|
|||
*/
|
||||
package org.apache.hadoop.tracing;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import org.apache.commons.lang.RandomStringUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Span;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.Sampler;
|
||||
import org.apache.htrace.core.Span;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
import java.io.IOException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
|
||||
public class TestTracing {
|
||||
|
||||
private static Configuration conf;
|
||||
private static MiniDFSCluster cluster;
|
||||
private static DistributedFileSystem dfs;
|
||||
|
||||
private Tracer prevTracer;
|
||||
|
||||
private final static Configuration TRACING_CONF;
|
||||
private final static Configuration NO_TRACING_CONF;
|
||||
|
||||
static {
|
||||
NO_TRACING_CONF = new Configuration();
|
||||
NO_TRACING_CONF.setLong("dfs.blocksize", 100 * 1024);
|
||||
|
||||
TRACING_CONF = new Configuration(NO_TRACING_CONF);
|
||||
TRACING_CONF.set(CommonConfigurationKeys.FS_CLIENT_HTRACE_PREFIX +
|
||||
Tracer.SPAN_RECEIVER_CLASSES_KEY,
|
||||
SetSpanReceiver.class.getName());
|
||||
TRACING_CONF.set(CommonConfigurationKeys.FS_CLIENT_HTRACE_PREFIX +
|
||||
Tracer.SAMPLER_CLASSES_KEY, "AlwaysSampler");
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTracing() throws Exception {
|
||||
// write and read without tracing started
|
||||
String fileName = "testTracingDisabled.dat";
|
||||
writeTestFile(fileName);
|
||||
Assert.assertTrue(SetSpanReceiver.size() == 0);
|
||||
Assert.assertEquals(0, SetSpanReceiver.size());
|
||||
readTestFile(fileName);
|
||||
Assert.assertTrue(SetSpanReceiver.size() == 0);
|
||||
Assert.assertEquals(0, SetSpanReceiver.size());
|
||||
|
||||
writeWithTracing();
|
||||
readWithTracing();
|
||||
writeTestFile("testReadTraceHooks.dat");
|
||||
|
||||
FsTracer.clear();
|
||||
Tracer tracer = FsTracer.get(TRACING_CONF);
|
||||
writeWithTracing(tracer);
|
||||
readWithTracing(tracer);
|
||||
}
|
||||
|
||||
public void writeWithTracing() throws Exception {
|
||||
private void writeWithTracing(Tracer tracer) throws Exception {
|
||||
long startTime = System.currentTimeMillis();
|
||||
TraceScope ts = Trace.startSpan("testWriteTraceHooks", Sampler.ALWAYS);
|
||||
TraceScope ts = tracer.newScope("testWriteTraceHooks");
|
||||
writeTestFile("testWriteTraceHooks.dat");
|
||||
long endTime = System.currentTimeMillis();
|
||||
ts.close();
|
||||
|
@ -107,7 +129,8 @@ public class TestTracing {
|
|||
};
|
||||
for (String desc : spansInTopTrace) {
|
||||
for (Span span : map.get(desc)) {
|
||||
Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
|
||||
Assert.assertEquals(ts.getSpan().getSpanId().getHigh(),
|
||||
span.getSpanId().getHigh());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -120,12 +143,10 @@ public class TestTracing {
|
|||
SetSpanReceiver.clear();
|
||||
}
|
||||
|
||||
public void readWithTracing() throws Exception {
|
||||
String fileName = "testReadTraceHooks.dat";
|
||||
writeTestFile(fileName);
|
||||
private void readWithTracing(Tracer tracer) throws Exception {
|
||||
long startTime = System.currentTimeMillis();
|
||||
TraceScope ts = Trace.startSpan("testReadTraceHooks", Sampler.ALWAYS);
|
||||
readTestFile(fileName);
|
||||
TraceScope ts = tracer.newScope("testReadTraceHooks");
|
||||
readTestFile("testReadTraceHooks.dat");
|
||||
ts.close();
|
||||
long endTime = System.currentTimeMillis();
|
||||
|
||||
|
@ -150,7 +171,11 @@ public class TestTracing {
|
|||
// There should only be one trace id as it should all be homed in the
|
||||
// top trace.
|
||||
for (Span span : SetSpanReceiver.getSpans()) {
|
||||
Assert.assertEquals(ts.getSpan().getTraceId(), span.getTraceId());
|
||||
System.out.println(span.toJson());
|
||||
}
|
||||
for (Span span : SetSpanReceiver.getSpans()) {
|
||||
Assert.assertEquals(ts.getSpan().getSpanId().getHigh(),
|
||||
span.getSpanId().getHigh());
|
||||
}
|
||||
SetSpanReceiver.clear();
|
||||
}
|
||||
|
@ -185,18 +210,9 @@ public class TestTracing {
|
|||
}
|
||||
}
|
||||
|
||||
@BeforeClass
|
||||
public static void setup() throws IOException {
|
||||
conf = new Configuration();
|
||||
conf.setLong("dfs.blocksize", 100 * 1024);
|
||||
conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
|
||||
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
|
||||
SetSpanReceiver.class.getName());
|
||||
}
|
||||
|
||||
@Before
|
||||
public void startCluster() throws IOException {
|
||||
cluster = new MiniDFSCluster.Builder(conf)
|
||||
cluster = new MiniDFSCluster.Builder(NO_TRACING_CONF)
|
||||
.numDataNodes(3)
|
||||
.build();
|
||||
cluster.waitActive();
|
||||
|
@ -207,6 +223,6 @@ public class TestTracing {
|
|||
@After
|
||||
public void shutDown() throws IOException {
|
||||
cluster.shutdown();
|
||||
FsTracer.clear();
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.io.IOException;
|
|||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FSDataInputStream;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.FsTracer;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
|
@ -33,9 +35,8 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.net.unix.DomainSocket;
|
||||
import org.apache.hadoop.net.unix.TemporarySocketDirectory;
|
||||
import org.apache.hadoop.util.NativeCodeLoader;
|
||||
import org.apache.htrace.Sampler;
|
||||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
import org.apache.htrace.core.TraceScope;
|
||||
import org.apache.htrace.core.Tracer;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
import org.junit.Test;
|
||||
|
@ -44,7 +45,6 @@ public class TestTracingShortCircuitLocalRead {
|
|||
private static Configuration conf;
|
||||
private static MiniDFSCluster cluster;
|
||||
private static DistributedFileSystem dfs;
|
||||
private static SpanReceiverHost spanReceiverHost;
|
||||
private static TemporarySocketDirectory sockDir;
|
||||
static final Path TEST_PATH = new Path("testShortCircuitTraceHooks");
|
||||
static final int TEST_LENGTH = 1234;
|
||||
|
@ -64,9 +64,12 @@ public class TestTracingShortCircuitLocalRead {
|
|||
public void testShortCircuitTraceHooks() throws IOException {
|
||||
assumeTrue(NativeCodeLoader.isNativeCodeLoaded() && !Path.WINDOWS);
|
||||
conf = new Configuration();
|
||||
conf.set(HdfsClientConfigKeys.DFS_CLIENT_HTRACE_PREFIX +
|
||||
SpanReceiverHost.SPAN_RECEIVERS_CONF_SUFFIX,
|
||||
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
|
||||
Tracer.SPAN_RECEIVER_CLASSES_KEY,
|
||||
SetSpanReceiver.class.getName());
|
||||
conf.set(TraceUtils.DEFAULT_HADOOP_PREFIX +
|
||||
Tracer.SAMPLER_CLASSES_KEY,
|
||||
"AlwaysSampler");
|
||||
conf.setLong("dfs.blocksize", 100 * 1024);
|
||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
|
||||
conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY, false);
|
||||
|
@ -81,7 +84,8 @@ public class TestTracingShortCircuitLocalRead {
|
|||
try {
|
||||
DFSTestUtil.createFile(dfs, TEST_PATH, TEST_LENGTH, (short)1, 5678L);
|
||||
|
||||
TraceScope ts = Trace.startSpan("testShortCircuitTraceHooks", Sampler.ALWAYS);
|
||||
TraceScope ts = FsTracer.get(conf).
|
||||
newScope("testShortCircuitTraceHooks");
|
||||
FSDataInputStream stream = dfs.open(TEST_PATH);
|
||||
byte buf[] = new byte[TEST_LENGTH];
|
||||
IOUtils.readFully(stream, buf, 0, TEST_LENGTH);
|
||||
|
|
|
@ -806,8 +806,8 @@
|
|||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.apache.htrace</groupId>
|
||||
<artifactId>htrace-core</artifactId>
|
||||
<version>3.2.0-incubating</version>
|
||||
<artifactId>htrace-core4</artifactId>
|
||||
<version>4.0.1-incubating</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>org.jdom</groupId>
|
||||
|
|
Loading…
Reference in New Issue