diff --git a/hbase-backup/pom.xml b/hbase-backup/pom.xml
index e73ea3020b9..80c4e253cdc 100644
--- a/hbase-backup/pom.xml
+++ b/hbase-backup/pom.xml
@@ -174,6 +174,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
net.java.dev.jets3t
jets3t
@@ -287,6 +291,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-client/pom.xml b/hbase-client/pom.xml
index 0634ef8b247..ab1a7451f61 100644
--- a/hbase-client/pom.xml
+++ b/hbase-client/pom.xml
@@ -166,7 +166,7 @@
org.apache.htrace
- htrace-core
+ htrace-core4
org.jruby.jcodings
@@ -258,6 +258,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
net.java.dev.jets3t
jets3t
@@ -326,6 +330,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
index 4df176809f0..91225a76100 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/AsyncRequestFutureImpl.java
@@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RetryImmediatelyException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@@ -56,7 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
+import org.apache.htrace.core.Tracer;
/**
* The context, and return value, for a single submit/submitAll call.
@@ -582,7 +583,13 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture {
asyncProcess.incTaskCounters(multiAction.getRegions(), server);
SingleServerRequestRunnable runnable = createSingleServerRequest(
multiAction, numAttempt, server, callsInProgress);
- return Collections.singletonList(Trace.wrap("AsyncProcess.sendMultiAction", runnable));
+ Tracer tracer = Tracer.curThreadTracer();
+
+ if (tracer == null) {
+ return Collections.singletonList(runnable);
+ } else {
+ return Collections.singletonList(tracer.wrap(runnable, "AsyncProcess.sendMultiAction"));
+ }
}
// group the actions by the amount of delay
@@ -618,7 +625,7 @@ class AsyncRequestFutureImpl implements AsyncRequestFuture {
asyncProcess.connection.getConnectionMetrics().incrNormalRunners();
}
}
- runnable = Trace.wrap(traceText, runnable);
+ runnable = TraceUtil.wrap(runnable, traceText);
toReturn.add(runnable);
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
index ccfe6baceb4..b05ad64146c 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/client/ResultBoundedCompletionService.java
@@ -28,9 +28,9 @@ import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Trace;
/**
* A completion service for the RpcRetryingCallerFactory.
@@ -168,7 +168,7 @@ public class ResultBoundedCompletionService {
public void submit(RetryingCallable task, int callTimeout, int id) {
QueueingFuture newFuture = new QueueingFuture<>(task, callTimeout, id);
- executor.execute(Trace.wrap(newFuture));
+ executor.execute(TraceUtil.wrap(newFuture, "ResultBoundedCompletionService.submit"));
tasks[id] = newFuture;
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
index 052433684a1..fcc6f7c5d6f 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/BlockingRpcConnection.java
@@ -24,10 +24,6 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.isFatalConnectionException;
import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled;
import static org.apache.hadoop.hbase.ipc.IPCUtil.write;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
-import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
-
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
@@ -55,10 +51,15 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback;
+import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
+import org.apache.hadoop.hbase.security.SaslUtil;
+import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
+import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
@@ -66,17 +67,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHea
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
-import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
-import org.apache.hadoop.hbase.security.SaslUtil;
-import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Thread that reads responses and notifies callers. Each connection owns a socket connected to a
@@ -574,7 +573,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable {
}
private void tracedWriteRequest(Call call) throws IOException {
- try (TraceScope ignored = Trace.startSpan("RpcClientImpl.tracedWriteRequest", call.span)) {
+ try (TraceScope ignored = TraceUtil.createTrace("RpcClientImpl.tracedWriteRequest",
+ call.span)) {
writeRequest(call);
}
}
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java
index 5c0689ada88..72f03f96164 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/Call.java
@@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.MetricsConnection;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.Tracer;
/** A call waiting for a value. */
@InterfaceAudience.Private
@@ -73,7 +73,7 @@ class Call {
this.timeout = timeout;
this.priority = priority;
this.callback = callback;
- this.span = Trace.currentSpan();
+ this.span = Tracer.getCurrentSpan();
}
@Override
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
index 7c0ddf02f9c..8e3e9aac499 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ipc/IPCUtil.java
@@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
@@ -102,10 +101,11 @@ class IPCUtil {
static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) {
RequestHeader.Builder builder = RequestHeader.newBuilder();
builder.setCallId(call.id);
- if (call.span != null) {
+ //TODO handle htrace API change, see HBASE-18895
+ /*if (call.span != null) {
builder.setTraceInfo(RPCTInfo.newBuilder().setParentId(call.span.getSpanId())
- .setTraceId(call.span.getTraceId()));
- }
+ .setTraceId(call.span.getTracerId()));
+ }*/
builder.setMethodName(call.md.getName());
builder.setRequestParam(call.param != null);
if (cellBlockMeta != null) {
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
index 94377c064d2..04f709f92d3 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/zookeeper/RecoverableZooKeeper.java
@@ -33,8 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.RetryCounterFactory;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.CreateMode;
@@ -156,11 +157,8 @@ public class RecoverableZooKeeper {
* This function will not throw NoNodeException if the path does not
* exist.
*/
- public void delete(String path, int version)
- throws InterruptedException, KeeperException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.delete");
+ public void delete(String path, int version) throws InterruptedException, KeeperException {
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.delete")) {
RetryCounter retryCounter = retryCounterFactory.create();
boolean isRetry = false; // False for first attempt, true for all retries.
while (true) {
@@ -197,8 +195,6 @@ public class RecoverableZooKeeper {
retryCounter.sleepUntilNextRetry();
isRetry = true;
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -206,11 +202,8 @@ public class RecoverableZooKeeper {
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
- public Stat exists(String path, Watcher watcher)
- throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.exists");
+ public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -236,8 +229,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -245,11 +236,8 @@ public class RecoverableZooKeeper {
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
- public Stat exists(String path, boolean watch)
- throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.exists");
+ public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -275,8 +263,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -297,9 +283,7 @@ public class RecoverableZooKeeper {
*/
public List getChildren(String path, Watcher watcher)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -325,8 +309,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -336,9 +318,7 @@ public class RecoverableZooKeeper {
*/
public List getChildren(String path, boolean watch)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -364,8 +344,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -375,9 +353,7 @@ public class RecoverableZooKeeper {
*/
public byte[] getData(String path, Watcher watcher, Stat stat)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.getData");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -403,8 +379,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -414,9 +388,7 @@ public class RecoverableZooKeeper {
*/
public byte[] getData(String path, boolean watch, Stat stat)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.getData");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -442,8 +414,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -455,9 +425,7 @@ public class RecoverableZooKeeper {
*/
public Stat setData(String path, byte[] data, int version)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.setData");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setData")) {
RetryCounter retryCounter = retryCounterFactory.create();
byte[] newData = appendMetaData(id, data);
boolean isRetry = false;
@@ -505,8 +473,6 @@ public class RecoverableZooKeeper {
retryCounter.sleepUntilNextRetry();
isRetry = true;
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -516,9 +482,7 @@ public class RecoverableZooKeeper {
*/
public List getAcl(String path, Stat stat)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.getAcl");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getAcl")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -544,8 +508,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -555,9 +517,7 @@ public class RecoverableZooKeeper {
*/
public Stat setAcl(String path, List acls, int version)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.setAcl");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setAcl")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@@ -583,8 +543,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -606,9 +564,7 @@ public class RecoverableZooKeeper {
public String create(String path, byte[] data, List acl,
CreateMode createMode)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.create");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.create")) {
byte[] newData = appendMetaData(id, data);
switch (createMode) {
case EPHEMERAL:
@@ -623,8 +579,6 @@ public class RecoverableZooKeeper {
throw new IllegalArgumentException("Unrecognized CreateMode: " +
createMode);
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
@@ -753,9 +707,7 @@ public class RecoverableZooKeeper {
*/
public List multi(Iterable ops)
throws KeeperException, InterruptedException {
- TraceScope traceScope = null;
- try {
- traceScope = Trace.startSpan("RecoverableZookeeper.multi");
+ try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.multi")) {
RetryCounter retryCounter = retryCounterFactory.create();
Iterable multiOps = prepareZKMulti(ops);
while (true) {
@@ -782,8 +734,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
- } finally {
- if (traceScope != null) traceScope.close();
}
}
diff --git a/hbase-common/pom.xml b/hbase-common/pom.xml
index 0c457368bb8..3f8a66a5fbe 100644
--- a/hbase-common/pom.xml
+++ b/hbase-common/pom.xml
@@ -244,7 +244,7 @@
org.apache.htrace
- htrace-core
+ htrace-core4
org.apache.commons
@@ -344,6 +344,12 @@
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
@@ -390,6 +396,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
index 55e53e376af..b31a4f6d73a 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/HBaseHTraceConfiguration.java
@@ -18,16 +18,15 @@
package org.apache.hadoop.hbase.trace;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.HTraceConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.core.HTraceConfiguration;
+import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public class HBaseHTraceConfiguration extends HTraceConfiguration {
- private static final Log LOG =
- LogFactory.getLog(HBaseHTraceConfiguration.class);
+ private static final Log LOG = LogFactory.getLog(HBaseHTraceConfiguration.class);
public static final String KEY_PREFIX = "hbase.htrace.";
@@ -65,7 +64,7 @@ public class HBaseHTraceConfiguration extends HTraceConfiguration {
@Override
public String get(String key) {
- return conf.get(KEY_PREFIX +key);
+ return conf.get(KEY_PREFIX + key);
}
@Override
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
index cb65f0970e7..93a5fff8db9 100644
--- a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/SpanReceiverHost.java
@@ -24,10 +24,8 @@ import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.core.SpanReceiver;
import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.SpanReceiver;
-import org.apache.htrace.SpanReceiverBuilder;
-import org.apache.htrace.Trace;
/**
* This class provides functions for reading the names of SpanReceivers from
@@ -62,6 +60,16 @@ public class SpanReceiverHost {
}
+ public static Configuration getConfiguration(){
+ synchronized (SingletonHolder.INSTANCE.lock) {
+ if (SingletonHolder.INSTANCE.host == null || SingletonHolder.INSTANCE.host.conf == null) {
+ return null;
+ }
+
+ return SingletonHolder.INSTANCE.host.conf;
+ }
+ }
+
SpanReceiverHost(Configuration conf) {
receivers = new HashSet<>();
this.conf = conf;
@@ -78,18 +86,18 @@ public class SpanReceiverHost {
return;
}
- SpanReceiverBuilder builder = new SpanReceiverBuilder(new HBaseHTraceConfiguration(conf));
+ SpanReceiver.Builder builder = new SpanReceiver.Builder(new HBaseHTraceConfiguration(conf));
for (String className : receiverNames) {
className = className.trim();
- SpanReceiver receiver = builder.spanReceiverClass(className).build();
+ SpanReceiver receiver = builder.className(className).build();
if (receiver != null) {
receivers.add(receiver);
LOG.info("SpanReceiver " + className + " was loaded successfully.");
}
}
for (SpanReceiver rcvr : receivers) {
- Trace.addReceiver(rcvr);
+ TraceUtil.addReceiver(rcvr);
}
}
diff --git a/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java
new file mode 100644
index 00000000000..d52c67d80d8
--- /dev/null
+++ b/hbase-common/src/main/java/org/apache/hadoop/hbase/trace/TraceUtil.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.trace;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.htrace.core.HTraceConfiguration;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanReceiver;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+
+/**
+ * This wrapper class provides functions for accessing htrace 4+ functionality in a simplified way.
+ */
+public final class TraceUtil {
+ private static HTraceConfiguration conf;
+ private static Tracer tracer;
+
+ private TraceUtil() {
+ }
+
+ public static void initTracer(Configuration c) {
+ if(c != null) {
+ conf = new HBaseHTraceConfiguration(c);
+ }
+
+ if (tracer == null && conf != null) {
+ tracer = new Tracer.Builder("Tracer").conf(conf).build();
+ }
+ }
+
+ /**
+ * Wrapper method to create new TraceScope with the given description
+ * @return TraceScope or null when not tracing
+ */
+ public static TraceScope createTrace(String description) {
+ return (tracer == null) ? null : tracer.newScope(description);
+ }
+
+ /**
+ * Wrapper method to create new child TraceScope with the given description
+ * and parent scope's spanId
+ * @param span parent span
+ * @return TraceScope or null when not tracing
+ */
+ public static TraceScope createTrace(String description, Span span) {
+ if(span == null) return createTrace(description);
+
+ return (tracer == null) ? null : tracer.newScope(description, span.getSpanId());
+ }
+
+ /**
+ * Wrapper method to add new sampler to the default tracer
+ * @return true if added, false if it was already added
+ */
+ public static boolean addSampler(Sampler sampler) {
+ if (sampler == null) {
+ return false;
+ }
+
+ return (tracer == null) ? false : tracer.addSampler(sampler);
+ }
+
+ /**
+ * Wrapper method to add key-value pair to TraceInfo of actual span
+ */
+ public static void addKVAnnotation(String key, String value){
+ Span span = Tracer.getCurrentSpan();
+ if (span != null) {
+ span.addKVAnnotation(key, value);
+ }
+ }
+
+ /**
+ * Wrapper method to add receiver to actual tracerpool
+ * @return true if successfull, false if it was already added
+ */
+ public static boolean addReceiver(SpanReceiver rcvr) {
+ return (tracer == null) ? false : tracer.getTracerPool().addReceiver(rcvr);
+ }
+
+ /**
+ * Wrapper method to remove receiver from actual tracerpool
+ * @return true if removed, false if doesn't exist
+ */
+ public static boolean removeReceiver(SpanReceiver rcvr) {
+ return (tracer == null) ? false : tracer.getTracerPool().removeReceiver(rcvr);
+ }
+
+ /**
+ * Wrapper method to add timeline annotiation to current span with given message
+ */
+ public static void addTimelineAnnotation(String msg) {
+ Span span = Tracer.getCurrentSpan();
+ if (span != null) {
+ span.addTimelineAnnotation(msg);
+ }
+ }
+
+ /**
+ * Wrap runnable with current tracer and description
+ * @param runnable to wrap
+ * @return wrapped runnable or original runnable when not tracing
+ */
+ public static Runnable wrap(Runnable runnable, String description) {
+ return (tracer == null) ? runnable : tracer.wrap(runnable, description);
+ }
+}
diff --git a/hbase-endpoint/pom.xml b/hbase-endpoint/pom.xml
index 5acd1b72a09..564565eee6f 100644
--- a/hbase-endpoint/pom.xml
+++ b/hbase-endpoint/pom.xml
@@ -254,6 +254,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
@@ -285,6 +291,10 @@
test-jar
test
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -296,6 +306,10 @@
hadoop-minicluster
test
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -330,11 +344,21 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
diff --git a/hbase-examples/pom.xml b/hbase-examples/pom.xml
index 8d78192034c..678e045065e 100644
--- a/hbase-examples/pom.xml
+++ b/hbase-examples/pom.xml
@@ -245,6 +245,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
@@ -290,10 +296,22 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-external-blockcache/pom.xml b/hbase-external-blockcache/pom.xml
index 7cbfcee3dfb..f8073ab0efa 100644
--- a/hbase-external-blockcache/pom.xml
+++ b/hbase-external-blockcache/pom.xml
@@ -173,7 +173,7 @@
org.apache.htrace
- htrace-core
+ htrace-core4
junit
@@ -244,6 +244,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -296,6 +300,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
diff --git a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
index d7593676b3b..c05499c34e2 100644
--- a/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
+++ b/hbase-external-blockcache/src/main/java/org/apache/hadoop/hbase/io/hfile/MemcachedBlockCache.java
@@ -1,3 +1,4 @@
+
/**
* Copyright The Apache Software Foundation
*
@@ -19,24 +20,6 @@
package org.apache.hadoop.hbase.io.hfile;
-import net.spy.memcached.CachedData;
-import net.spy.memcached.ConnectionFactoryBuilder;
-import net.spy.memcached.FailureMode;
-import net.spy.memcached.MemcachedClient;
-import net.spy.memcached.transcoders.Transcoder;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hbase.HConstants;
-import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
-import org.apache.hadoop.hbase.nio.ByteBuff;
-import org.apache.hadoop.hbase.nio.SingleByteBuff;
-import org.apache.hadoop.hbase.util.Addressing;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-
-
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
@@ -46,6 +29,24 @@ import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.ExecutionException;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
+import org.apache.hadoop.hbase.nio.ByteBuff;
+import org.apache.hadoop.hbase.nio.SingleByteBuff;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.Addressing;
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
+
+import net.spy.memcached.CachedData;
+import net.spy.memcached.ConnectionFactoryBuilder;
+import net.spy.memcached.FailureMode;
+import net.spy.memcached.MemcachedClient;
+import net.spy.memcached.transcoders.Transcoder;
+
/**
* Class to store blocks into memcached.
* This should only be used on a cluster of Memcached daemons that are tuned well and have a
@@ -134,7 +135,7 @@ public class MemcachedBlockCache implements BlockCache {
// Assume that nothing is the block cache
HFileBlock result = null;
- try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
+ try (TraceScope traceScope = TraceUtil.createTrace("MemcachedBlockCache.getBlock")) {
result = client.get(cacheKey.toString(), tc);
} catch (Exception e) {
// Catch a pretty broad set of exceptions to limit any changes in the memecache client
diff --git a/hbase-hadoop2-compat/pom.xml b/hbase-hadoop2-compat/pom.xml
index 071cf2d2e3f..925cc5d7b57 100644
--- a/hbase-hadoop2-compat/pom.xml
+++ b/hbase-hadoop2-compat/pom.xml
@@ -170,6 +170,12 @@ limitations under the License.
org.apache.hadoop
hadoop-common
${hadoop-two.version}
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.commons
diff --git a/hbase-it/pom.xml b/hbase-it/pom.xml
index 0834b6506e2..a95827a0525 100644
--- a/hbase-it/pom.xml
+++ b/hbase-it/pom.xml
@@ -266,7 +266,7 @@
org.apache.htrace
- htrace-core
+ htrace-core4
javax.ws.rs
@@ -350,6 +350,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
@@ -400,10 +406,22 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
index 71e0d0b3f9f..503d4c10dc7 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/mttr/IntegrationTestMTTR.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
-import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.chaos.actions.Action;
import org.apache.hadoop.hbase.chaos.actions.MoveRegionsOfTableAction;
import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction;
@@ -62,20 +61,19 @@ import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
import org.apache.hadoop.hbase.ipc.FatalConnectionException;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
+import org.apache.hadoop.hbase.testclassification.IntegrationTests;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.LoadTestTool;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.AlwaysSampler;
+import org.apache.htrace.core.AlwaysSampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
-
/**
* Integration test that should benchmark how fast HBase can recover from failures. This test starts
* different threads:
@@ -268,7 +266,7 @@ public class IntegrationTestMTTR {
loadTool = null;
}
-
+
private static boolean tablesOnMaster() {
boolean ret = true;
String value = util.getConfiguration().get("hbase.balancer.tablesOnMaster");
@@ -369,7 +367,7 @@ public class IntegrationTestMTTR {
*/
private static class TimingResult {
DescriptiveStatistics stats = new DescriptiveStatistics();
- ArrayList traces = new ArrayList<>(10);
+ ArrayList traces = new ArrayList<>(10);
/**
* Add a result to this aggregate result.
@@ -377,9 +375,12 @@ public class IntegrationTestMTTR {
* @param span Span. To be kept if the time taken was over 1 second
*/
public void addResult(long time, Span span) {
+ if (span == null) {
+ return;
+ }
stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS));
if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) {
- traces.add(span.getTraceId());
+ traces.add(span.getTracerId());
}
}
@@ -419,12 +420,15 @@ public class IntegrationTestMTTR {
final int maxIterations = 10;
int numAfterDone = 0;
int resetCount = 0;
+ TraceUtil.addSampler(AlwaysSampler.INSTANCE);
// Keep trying until the rs is back up and we've gotten a put through
while (numAfterDone < maxIterations) {
long start = System.nanoTime();
- TraceScope scope = null;
- try {
- scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE);
+ Span span = null;
+ try (TraceScope scope = TraceUtil.createTrace(getSpanName())) {
+ if (scope != null) {
+ span = scope.getSpan();
+ }
boolean actionResult = doAction();
if (actionResult && future.isDone()) {
numAfterDone++;
@@ -470,12 +474,8 @@ public class IntegrationTestMTTR {
LOG.info("Too many unexpected Exceptions. Aborting.", e);
throw e;
}
- } finally {
- if (scope != null) {
- scope.close();
- }
}
- result.addResult(System.nanoTime() - start, scope.getSpan());
+ result.addResult(System.nanoTime() - start, span);
}
return result;
}
diff --git a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
index 327d8792d60..780c4618768 100644
--- a/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
+++ b/hbase-it/src/test/java/org/apache/hadoop/hbase/trace/IntegrationTestSendTraceRequests.java
@@ -35,9 +35,8 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@@ -117,13 +116,12 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
for (int i = 0; i < 100; i++) {
Runnable runnable = new Runnable() {
- private TraceScope innerScope = null;
private final LinkedBlockingQueue rowKeyQueue = rks;
@Override
public void run() {
ResultScanner rs = null;
- try {
- innerScope = Trace.startSpan("Scan", Sampler.ALWAYS);
+ TraceUtil.addSampler(Sampler.ALWAYS);
+ try (TraceScope scope = TraceUtil.createTrace("Scan")){
Table ht = util.getConnection().getTable(tableName);
Scan s = new Scan();
s.setStartRow(Bytes.toBytes(rowKeyQueue.take()));
@@ -137,20 +135,15 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
accum |= Bytes.toLong(r.getRow());
}
- innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum);
+ TraceUtil.addTimelineAnnotation("Accum result = " + accum);
ht.close();
ht = null;
} catch (IOException e) {
e.printStackTrace();
-
- innerScope.getSpan().addKVAnnotation(
- Bytes.toBytes("exception"),
- Bytes.toBytes(e.getClass().getSimpleName()));
-
+ TraceUtil.addKVAnnotation("exception", e.getClass().getSimpleName());
} catch (Exception e) {
} finally {
- if (innerScope != null) innerScope.close();
if (rs != null) rs.close();
}
@@ -165,7 +158,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
throws IOException {
for (int i = 0; i < 100; i++) {
Runnable runnable = new Runnable() {
- private TraceScope innerScope = null;
private final LinkedBlockingQueue rowKeyQueue = rowKeys;
@Override
@@ -180,9 +172,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
}
long accum = 0;
+ TraceUtil.addSampler(Sampler.ALWAYS);
for (int x = 0; x < 5; x++) {
- try {
- innerScope = Trace.startSpan("gets", Sampler.ALWAYS);
+ try (TraceScope scope = TraceUtil.createTrace("gets")) {
long rk = rowKeyQueue.take();
Result r1 = ht.get(new Get(Bytes.toBytes(rk)));
if (r1 != null) {
@@ -192,14 +184,10 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
if (r2 != null) {
accum |= Bytes.toLong(r2.getRow());
}
- innerScope.getSpan().addTimelineAnnotation("Accum = " + accum);
+ TraceUtil.addTimelineAnnotation("Accum = " + accum);
- } catch (IOException e) {
+ } catch (IOException|InterruptedException ie) {
// IGNORED
- } catch (InterruptedException ie) {
- // IGNORED
- } finally {
- if (innerScope != null) innerScope.close();
}
}
@@ -210,25 +198,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
}
private void createTable() throws IOException {
- TraceScope createScope = null;
- try {
- createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
+ TraceUtil.addSampler(Sampler.ALWAYS);
+ try (TraceScope scope = TraceUtil.createTrace("createTable")) {
util.createTable(tableName, familyName);
- } finally {
- if (createScope != null) createScope.close();
}
}
private void deleteTable() throws IOException {
- TraceScope deleteScope = null;
-
- try {
+ TraceUtil.addSampler(Sampler.ALWAYS);
+ try (TraceScope scope = TraceUtil.createTrace("deleteTable")) {
if (admin.tableExists(tableName)) {
- deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
util.deleteTable(tableName);
}
- } finally {
- if (deleteScope != null) deleteScope.close();
}
}
@@ -236,9 +217,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
LinkedBlockingQueue rowKeys = new LinkedBlockingQueue<>(25000);
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
byte[] value = new byte[300];
+ TraceUtil.addSampler(Sampler.ALWAYS);
for (int x = 0; x < 5000; x++) {
- TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
- try {
+ try (TraceScope traceScope = TraceUtil.createTrace("insertData")) {
for (int i = 0; i < 5; i++) {
long rk = random.nextLong();
rowKeys.add(rk);
@@ -252,8 +233,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
if ((x % 1000) == 0) {
admin.flush(tableName);
}
- } finally {
- traceScope.close();
}
}
admin.flush(tableName);
diff --git a/hbase-mapreduce/pom.xml b/hbase-mapreduce/pom.xml
index e9a63b9ccb3..2b490c1609b 100644
--- a/hbase-mapreduce/pom.xml
+++ b/hbase-mapreduce/pom.xml
@@ -181,7 +181,7 @@
org.apache.htrace
- htrace-core
+ htrace-core4
org.apache.hbase
@@ -246,6 +246,10 @@
test-jar
test
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -331,6 +335,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
net.java.dev.jets3t
jets3t
@@ -377,6 +385,10 @@
org.apache.hadoop
hadoop-hdfs
+
+ org.apache.htrace
+ htrace-core
+
javax.servlet.jsp
jsp-api
@@ -415,6 +427,12 @@
org.apache.hadoop
hadoop-minicluster
test
+
+
+ org.apache.htrace
+ htrace-core
+
+
@@ -439,11 +457,23 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
index 40e2cb9bcca..31d33f2cf3e 100644
--- a/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
+++ b/hbase-mapreduce/src/main/java/org/apache/hadoop/hbase/mapreduce/TableMapReduceUtil.java
@@ -818,7 +818,7 @@ public class TableMapReduceUtil {
com.google.protobuf.Message.class,
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.class,
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
- org.apache.htrace.Trace.class,
+ org.apache.htrace.core.Tracer.class,
com.codahale.metrics.MetricRegistry.class,
org.apache.commons.lang3.ArrayUtils.class,
com.fasterxml.jackson.databind.ObjectMapper.class,
diff --git a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
index bc36cdefc80..29176050814 100644
--- a/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
+++ b/hbase-mapreduce/src/test/java/org/apache/hadoop/hbase/PerformanceEvaluation.java
@@ -31,10 +31,10 @@ import java.util.Date;
import java.util.LinkedList;
import java.util.Locale;
import java.util.Map;
+import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.Random;
import java.util.TreeMap;
-import java.util.NoSuchElementException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
@@ -48,7 +48,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.AsyncConnection;
@@ -81,9 +80,17 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
+import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
-import org.apache.hadoop.hbase.util.*;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.ByteArrayHashKey;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Hash;
+import org.apache.hadoop.hbase.util.MurmurHash;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.YammerHistogramUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
@@ -93,17 +100,15 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.ProbabilitySampler;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
-import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
+import org.apache.htrace.core.ProbabilitySampler;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.UniformReservoir;
-import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.MapperFeature;
+import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Script used evaluating HBase performance and scalability. Runs a HBase
@@ -1034,7 +1039,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
protected final TestOptions opts;
private final Status status;
- private final Sampler> traceSampler;
+ private final Sampler traceSampler;
private final SpanReceiverHost receiverHost;
private String testName;
@@ -1182,17 +1187,15 @@ public class PerformanceEvaluation extends Configured implements Tool {
void testTimed() throws IOException, InterruptedException {
int startRow = getStartRow();
int lastRow = getLastRow();
+ TraceUtil.addSampler(traceSampler);
// Report on completion of 1/10th of total.
for (int ii = 0; ii < opts.cycles; ii++) {
if (opts.cycles > 1) LOG.info("Cycle=" + ii + " of " + opts.cycles);
for (int i = startRow; i < lastRow; i++) {
if (i % everyN != 0) continue;
long startTime = System.nanoTime();
- TraceScope scope = Trace.startSpan("test row", traceSampler);
- try {
+ try (TraceScope scope = TraceUtil.createTrace("test row");){
testRow(i);
- } finally {
- scope.close();
}
if ( (i - startRow) > opts.measureAfter) {
// If multiget is enabled, say set to 10, testRow() returns immediately first 9 times
diff --git a/hbase-procedure/pom.xml b/hbase-procedure/pom.xml
index 82644c233a8..3f87d210591 100644
--- a/hbase-procedure/pom.xml
+++ b/hbase-procedure/pom.xml
@@ -164,6 +164,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-protocol-shaded/pom.xml b/hbase-protocol-shaded/pom.xml
index 275de491fd5..634a98dab11 100644
--- a/hbase-protocol-shaded/pom.xml
+++ b/hbase-protocol-shaded/pom.xml
@@ -230,6 +230,10 @@
junit
test
+
+ org.apache.htrace
+ htrace-core4
+
diff --git a/hbase-replication/pom.xml b/hbase-replication/pom.xml
index 7d4569ffd59..a8a75afaa8b 100644
--- a/hbase-replication/pom.xml
+++ b/hbase-replication/pom.xml
@@ -165,6 +165,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
net.java.dev.jets3t
jets3t
@@ -229,6 +233,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-rest/pom.xml b/hbase-rest/pom.xml
index d23b82809db..a83b9a8552a 100644
--- a/hbase-rest/pom.xml
+++ b/hbase-rest/pom.xml
@@ -202,6 +202,10 @@
org.apache.hbase
hbase-client
+
+ org.apache.hbase
+ hbase-hadoop-compat
+
org.apache.hbase
hbase-server
@@ -387,6 +391,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
@@ -426,6 +436,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
diff --git a/hbase-rsgroup/pom.xml b/hbase-rsgroup/pom.xml
index 853695a1c6b..de46acb4858 100644
--- a/hbase-rsgroup/pom.xml
+++ b/hbase-rsgroup/pom.xml
@@ -202,6 +202,10 @@
org.apache.hadoop
hadoop-common
+
+ org.apache.htrace
+ htrace-core
+
net.java.dev.jets3t
jets3t
@@ -265,6 +269,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/hbase-server/pom.xml b/hbase-server/pom.xml
index 31be5e24cc1..97f8f9dacd0 100644
--- a/hbase-server/pom.xml
+++ b/hbase-server/pom.xml
@@ -538,9 +538,14 @@
+
+ org.apache.htrace
+ htrace-core4
+
org.apache.htrace
htrace-core
+ ${htrace-hadoop.version}
com.lmax
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
index cfe3d61ee0a..1056c20453b 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/executor/EventHandler.java
@@ -23,11 +23,12 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
-import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Server;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
+import org.apache.yetus.audience.InterfaceAudience;
/**
* Abstract base class for all HBase event handlers. Subclasses should
@@ -74,7 +75,7 @@ public abstract class EventHandler implements Runnable, Comparable {
* Default base class constructor.
*/
public EventHandler(Server server, EventType eventType) {
- this.parent = Trace.currentSpan();
+ this.parent = Tracer.getCurrentSpan();
this.server = server;
this.eventType = eventType;
seqid = seqids.incrementAndGet();
@@ -99,13 +100,10 @@ public abstract class EventHandler implements Runnable, Comparable {
@Override
public void run() {
- TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
- try {
+ try (TraceScope scope = TraceUtil.createTrace(this.getClass().getSimpleName(), parent)) {
process();
} catch(Throwable t) {
handleException(t);
- } finally {
- chunk.close();
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
index 9e2902382e4..f216f4219c2 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/io/hfile/HFileReaderImpl.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.SizeCachedKeyValue;
import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
@@ -59,8 +60,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.IdLock;
import org.apache.hadoop.hbase.util.ObjectIntPair;
import org.apache.hadoop.io.WritableUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
@@ -255,6 +255,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// Prefetch file blocks upon open if requested
if (cacheConf.shouldPrefetchOnOpen()) {
PrefetchExecutor.request(path, new Runnable() {
+ @Override
public void run() {
long offset = 0;
long end = 0;
@@ -436,6 +437,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
* @return the total heap size of data and meta block indexes in bytes. Does
* not take into account non-root blocks of a multilevel data index.
*/
+ @Override
public long indexSize() {
return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize()
@@ -1239,6 +1241,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
}
}
+ @Override
public Path getPath() {
return path;
}
@@ -1276,10 +1279,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
protected boolean decodeMemstoreTS = false;
+ @Override
public boolean isDecodeMemStoreTS() {
return this.decodeMemstoreTS;
}
+ @Override
public boolean shouldIncludeMemStoreTS() {
return includesMemstoreTS;
}
@@ -1437,8 +1442,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
boolean useLock = false;
IdLock.Entry lockEntry = null;
- TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock");
- try {
+ try (TraceScope traceScope = TraceUtil.createTrace("HFileReaderImpl.readBlock")) {
while (true) {
// Check cache for block. If found return.
if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
@@ -1453,9 +1457,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
if (LOG.isTraceEnabled()) {
LOG.trace("From Cache " + cachedBlock);
}
- if (Trace.isTracing()) {
- traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
- }
+ TraceUtil.addTimelineAnnotation("blockCacheHit");
assert cachedBlock.isUnpacked() : "Packed block leak.";
if (cachedBlock.getBlockType().isData()) {
if (updateCacheMetrics) {
@@ -1481,9 +1483,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// Carry on, please load.
}
- if (Trace.isTracing()) {
- traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
- }
+ TraceUtil.addTimelineAnnotation("blockCacheMiss");
// Load block from filesystem.
HFileBlock hfileBlock =
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction);
@@ -1505,7 +1505,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return unpacked;
}
} finally {
- traceScope.close();
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
@@ -1568,6 +1567,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
close(cacheConf.shouldEvictOnClose());
}
+ @Override
public void close(boolean evictOnClose) throws IOException {
PrefetchExecutor.cancel(path);
if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
@@ -1580,11 +1580,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
fsBlockReader.closeStreams();
}
+ @Override
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
return dataBlockEncoder.getEffectiveEncodingInCache(isCompaction);
}
/** For testing */
+ @Override
public HFileBlock.FSReader getUncachedBlockReader() {
return fsBlockReader;
}
@@ -1612,6 +1614,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return curBlock != null;
}
+ @Override
public void setNonSeekedState() {
reset();
}
@@ -1713,6 +1716,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
}
}
+ @Override
protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock));
}
@@ -1730,6 +1734,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return seeker.seekToKeyInBlock(key, seekBefore);
}
+ @Override
public int compareKey(CellComparator comparator, Cell key) {
return seeker.compareKey(comparator, key);
}
@@ -1776,6 +1781,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
* Returns false if block prefetching was requested for this file and has
* not completed, true otherwise
*/
+ @Override
@VisibleForTesting
public boolean prefetchComplete() {
return PrefetchExecutor.isCompleted(path);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
index d4fc70624ed..141674dcfeb 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/CallRunner.java
@@ -24,6 +24,7 @@ import java.util.Optional;
import org.apache.hadoop.hbase.CallDroppedException;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
@@ -32,8 +33,6 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
/**
* The request processing logic, which is usually executed in thread pools provided by an
@@ -116,20 +115,17 @@ public class CallRunner {
String error = null;
Pair resultPair = null;
RpcServer.CurCall.set(call);
- TraceScope traceScope = null;
try {
if (!this.rpcServer.isStarted()) {
InetSocketAddress address = rpcServer.getListenerAddress();
throw new ServerNotRunningYetException("Server " +
(address != null ? address : "(channel closed)") + " is not running yet");
}
- if (call.getTraceInfo() != null) {
- String serviceName =
- call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
- String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
- String traceString = serviceName + "." + methodName;
- traceScope = Trace.startSpan(traceString, call.getTraceInfo());
- }
+ String serviceName =
+ call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
+ String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
+ String traceString = serviceName + "." + methodName;
+ TraceUtil.createTrace(traceString);
// make the call
resultPair = this.rpcServer.call(call, this.status);
} catch (TimeoutIOException e){
@@ -150,9 +146,6 @@ public class CallRunner {
throw (Error)e;
}
} finally {
- if (traceScope != null) {
- traceScope.close();
- }
RpcServer.CurCall.set(null);
if (resultPair != null) {
this.rpcServer.addCallSize(call.getSize() * -1);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
index 7fd4736491c..f86fa774bac 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyRpcServer.java
@@ -181,7 +181,7 @@ public class NettyRpcServer extends RpcServer {
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
long startTime, int timeout) throws IOException {
NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null,
- -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
+ -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
return call(fakeCall, status);
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java
index 7dfdc7272e5..70b9da3014c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerCall.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
/**
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
@@ -40,9 +39,9 @@ class NettyServerCall extends ServerCall {
NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size,
- TraceInfo tinfo, InetAddress remoteAddress, long receiveTime, int timeout,
+ InetAddress remoteAddress, long receiveTime, int timeout,
ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
- super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
+ super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
index 21c7f519056..a91aafb58ec 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/NettyServerRpcConnection.java
@@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
/**
* RpcConnection implementation for netty rpc server.
@@ -119,9 +118,9 @@ class NettyServerRpcConnection extends ServerRpcConnection {
@Override
public NettyServerCall createCall(int id, final BlockingService service,
final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner,
- long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout,
+ long size, final InetAddress remoteAddress, int timeout,
CallCleanup reqCleanup) {
- return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
+ return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
this.rpcServer.cellBlockBuilder, reqCleanup);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
index 3562d86fd34..51b168493d6 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/RpcCall.java
@@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
/**
* Interface of all necessary to carry out a RPC method invocation on the server.
@@ -133,9 +132,4 @@ public interface RpcCall extends RpcCallContext {
* @return A short string format of this call without possibly lengthy params
*/
String toShortString();
-
- /**
- * @return TraceInfo attached to this call.
- */
- TraceInfo getTraceInfo();
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
index 60fe30e954b..2fca7f1aedf 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerCall.java
@@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeade
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.TraceInfo;
/**
* Datastructure that holds all necessary to a method invocation and then afterward, carries
@@ -79,7 +78,6 @@ abstract class ServerCall implements RpcCall, Rpc
protected final long size; // size of current call
protected boolean isError;
- protected final TraceInfo tinfo;
protected ByteBufferListOutputStream cellBlockStream = null;
protected CallCleanup reqCleanup = null;
@@ -96,7 +94,7 @@ abstract class ServerCall implements RpcCall, Rpc
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
justification="Can't figure why this complaint is happening... see below")
ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
- Message param, CellScanner cellScanner, T connection, long size, TraceInfo tinfo,
+ Message param, CellScanner cellScanner, T connection, long size,
InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
this.id = id;
@@ -110,7 +108,6 @@ abstract class ServerCall implements RpcCall, Rpc
this.response = null;
this.isError = false;
this.size = size;
- this.tinfo = tinfo;
if (connection != null) {
this.user = connection.user;
this.retryImmediatelySupported = connection.retryImmediatelySupported;
@@ -506,11 +503,6 @@ abstract class ServerCall implements RpcCall, Rpc
return connection.getRemotePort();
}
- @Override
- public TraceInfo getTraceInfo() {
- return tinfo;
- }
-
@Override
public synchronized BufferChain getResponse() {
return response;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
index e1ac74147a9..4d0239f4089 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/ServerRpcConnection.java
@@ -77,7 +77,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.htrace.TraceInfo;
/** Reads calls from a connection and queues them for handling. */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
@@ -632,7 +631,7 @@ abstract class ServerRpcConnection implements Closeable {
if ((totalRequestSize +
this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
final ServerCall> callTooBig = createCall(id, this.service, null, null, null, null,
- totalRequestSize, null, null, 0, this.callCleanup);
+ totalRequestSize, null, 0, this.callCleanup);
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
"Call queue is full on " + this.rpcServer.server.getServerName() +
@@ -694,21 +693,18 @@ abstract class ServerRpcConnection implements Closeable {
}
ServerCall> readParamsFailedCall = createCall(id, this.service, null, null, null, null,
- totalRequestSize, null, null, 0, this.callCleanup);
+ totalRequestSize, null, 0, this.callCleanup);
readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage());
readParamsFailedCall.sendResponseIfReady();
return;
}
- TraceInfo traceInfo = header.hasTraceInfo() ? new TraceInfo(header
- .getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
- : null;
int timeout = 0;
if (header.hasTimeout() && header.getTimeout() > 0) {
timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
}
ServerCall> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize,
- traceInfo, this.addr, timeout, this.callCleanup);
+ this.addr, timeout, this.callCleanup);
if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
@@ -790,7 +786,7 @@ abstract class ServerRpcConnection implements Closeable {
public abstract boolean isConnectionOpen();
public abstract ServerCall> createCall(int id, BlockingService service, MethodDescriptor md,
- RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
+ RequestHeader header, Message param, CellScanner cellScanner, long size,
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup);
private static class ByteBuffByteInput extends ByteInput {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
index 69cc48d4d3d..36ae74a0608 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleRpcServer.java
@@ -489,7 +489,7 @@ public class SimpleRpcServer extends RpcServer {
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
long startTime, int timeout) throws IOException {
SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner,
- null, -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
+ null, -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
return call(fakeCall, status);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
index 5a26c05b465..46295fda15c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerCall.java
@@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
-import org.apache.htrace.TraceInfo;
/**
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
@@ -43,10 +42,10 @@ class SimpleServerCall extends ServerCall {
justification = "Can't figure why this complaint is happening... see below")
SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md,
RequestHeader header, Message param, CellScanner cellScanner,
- SimpleServerRpcConnection connection, long size, TraceInfo tinfo,
+ SimpleServerRpcConnection connection, long size,
final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) {
- super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
+ super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
this.responder = responder;
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
index 68545f3c706..c8dfe4ab8a3 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/ipc/SimpleServerRpcConnection.java
@@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.hadoop.hbase.util.Pair;
-import org.apache.htrace.TraceInfo;
/** Reads calls from a connection and queues them for handling. */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT",
@@ -212,7 +211,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
// Notify the client about the offending request
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
- null, null, null, this, 0, null, this.addr, System.currentTimeMillis(), 0,
+ null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder);
this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
// Make sure the client recognizes the underlying exception
@@ -343,9 +342,9 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
@Override
public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md,
- RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
+ RequestHeader header, Message param, CellScanner cellScanner, long size,
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
- return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
+ return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
index 91c5218c527..cad77e5d952 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMaster.java
@@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
@@ -470,6 +471,7 @@ public class HMaster extends HRegionServer implements MasterServices {
public HMaster(final Configuration conf)
throws IOException, KeeperException {
super(conf);
+ TraceUtil.initTracer(conf);
try {
this.rsFatals = new MemoryBoundedLogMessageBuffer(
conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
index f9a441d5ab1..093412a47f4 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/master/HMasterCommandLine.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
@@ -147,6 +148,8 @@ public class HMasterCommandLine extends ServerCommandLine {
private int startMaster() {
Configuration conf = getConf();
+ TraceUtil.initTracer(conf);
+
try {
// If 'local', defer to LocalHBaseCluster instance. Starts master
// and regionserver both in the one JVM.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index d3cd087f21b..197aa3ce808 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -91,11 +91,11 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
-import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
+import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
@@ -149,33 +149,6 @@ import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.security.User;
-import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
-import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
-import org.apache.hadoop.hbase.util.Bytes;
-import org.apache.hadoop.hbase.util.CancelableProgressable;
-import org.apache.hadoop.hbase.util.ClassSize;
-import org.apache.hadoop.hbase.util.CollectionUtils;
-import org.apache.hadoop.hbase.util.CompressionTest;
-import org.apache.hadoop.hbase.util.EncryptionTest;
-import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
-import org.apache.hadoop.hbase.util.FSUtils;
-import org.apache.hadoop.hbase.util.HashedBytes;
-import org.apache.hadoop.hbase.util.NonceKey;
-import org.apache.hadoop.hbase.util.Pair;
-import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
-import org.apache.hadoop.hbase.util.Threads;
-import org.apache.hadoop.hbase.wal.WAL;
-import org.apache.hadoop.hbase.wal.WALEdit;
-import org.apache.hadoop.hbase.wal.WALFactory;
-import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.hadoop.hbase.wal.WALSplitter;
-import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
-import org.apache.hadoop.io.MultipleIOException;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.yetus.audience.InterfaceAudience;
-
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
@@ -198,6 +171,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescript
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
+import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
+import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.CancelableProgressable;
+import org.apache.hadoop.hbase.util.ClassSize;
+import org.apache.hadoop.hbase.util.CollectionUtils;
+import org.apache.hadoop.hbase.util.CompressionTest;
+import org.apache.hadoop.hbase.util.EncryptionTest;
+import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
+import org.apache.hadoop.hbase.util.FSUtils;
+import org.apache.hadoop.hbase.util.HashedBytes;
+import org.apache.hadoop.hbase.util.NonceKey;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
+import org.apache.hadoop.hbase.util.Threads;
+import org.apache.hadoop.hbase.wal.WAL;
+import org.apache.hadoop.hbase.wal.WALEdit;
+import org.apache.hadoop.hbase.wal.WALFactory;
+import org.apache.hadoop.hbase.wal.WALKey;
+import org.apache.hadoop.hbase.wal.WALSplitter;
+import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
+import org.apache.hadoop.io.MultipleIOException;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.htrace.core.TraceScope;
+import org.apache.yetus.audience.InterfaceAudience;
import edu.umd.cs.findbugs.annotations.Nullable;
@@ -3727,6 +3726,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce));
}
+ @Override
public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
}
@@ -5560,16 +5560,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
RowLockContext rowLockContext = null;
RowLockImpl result = null;
- TraceScope traceScope = null;
-
- // If we're tracing start a span to show how long this took.
- if (Trace.isTracing()) {
- traceScope = Trace.startSpan("HRegion.getRowLock");
- traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
- }
boolean success = false;
- try {
+ try (TraceScope scope = TraceUtil.createTrace("HRegion.getRowLock")) {
+ TraceUtil.addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
// Keep trying until we have a lock or error out.
// TODO: do we need to add a time component here?
while (result == null) {
@@ -5598,9 +5592,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
- if (traceScope != null) {
- traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
- }
+ TraceUtil.addTimelineAnnotation("Failed to get row lock");
result = null;
String message = "Timed out waiting for lock for row: " + rowKey + " in region "
+ getRegionInfo().getEncodedName();
@@ -5618,9 +5610,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
InterruptedIOException iie = new InterruptedIOException();
iie.initCause(ie);
- if (traceScope != null) {
- traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
- }
+ TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock");
Thread.currentThread().interrupt();
throw iie;
} finally {
@@ -5628,9 +5618,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
if (!success && rowLockContext != null) {
rowLockContext.cleanUp();
}
- if (traceScope != null) {
- traceScope.close();
- }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 6ad595f36a0..4c34fe0e0af 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
@@ -526,6 +527,7 @@ public class HRegionServer extends HasThread implements
// Defer till after we register with the Master as much as possible. See #startServices.
public HRegionServer(Configuration conf) throws IOException {
super("RegionServer"); // thread name
+ TraceUtil.initTracer(conf);
try {
this.startcode = System.currentTimeMillis();
this.conf = conf;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
index 1212668ca02..c2e1111e190 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServerCommandLine.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
@@ -50,6 +51,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
private int start() throws Exception {
Configuration conf = getConf();
+ TraceUtil.initTracer(conf);
try {
// If 'local', don't start a region server here. Defer to
// LocalHBaseCluster. It manages 'local' clusters.
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
index ae4c8ebc2aa..a314848cab7 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MemStoreFlusher.java
@@ -44,6 +44,8 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
+import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
@@ -51,12 +53,9 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
-
/**
* Thread that flushes cache on request
*
@@ -447,7 +446,7 @@ class MemStoreFlusher implements FlushRequester {
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
if (!this.server.compactSplitThread.requestSplit(region)) {
try {
- this.server.compactSplitThread.requestSystemCompaction((HRegion) region,
+ this.server.compactSplitThread.requestSystemCompaction(region,
Thread.currentThread().getName());
} catch (IOException e) {
e = e instanceof RemoteException ?
@@ -572,12 +571,10 @@ class MemStoreFlusher implements FlushRequester {
* amount of memstore consumption.
*/
public void reclaimMemStoreMemory() {
- TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
+ TraceScope scope = TraceUtil.createTrace("MemStoreFluser.reclaimMemStoreMemory");
FlushType flushType = isAboveHighWaterMark();
if (flushType != FlushType.NORMAL) {
- if (Trace.isTracing()) {
- scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
- }
+ TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
long start = EnvironmentEdgeManager.currentTime();
synchronized (this.blockSignal) {
boolean blocked = false;
@@ -640,7 +637,9 @@ class MemStoreFlusher implements FlushRequester {
} else if (isAboveLowWaterMark() != FlushType.NORMAL) {
wakeupFlushThread();
}
- scope.close();
+ if(scope!= null) {
+ scope.close();
+ }
}
private void logMsg(String string1, long val, long max) {
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
index ad54cabec55..f7fbd868088 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AbstractFSWAL.java
@@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
+import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.util.CommonFSUtils;
@@ -72,14 +74,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.util.StringUtils;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
-
import com.lmax.disruptor.RingBuffer;
/**
@@ -681,8 +679,7 @@ public abstract class AbstractFSWAL implements WAL {
* @throws IOException if there is a problem flushing or closing the underlying FS
*/
Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
- TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
- try {
+ try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) {
long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter);
int oldNumEntries = this.numEntries.getAndSet(0);
final String newPathString = (null == newPath ? null : CommonFSUtils.getPath(newPath));
@@ -696,16 +693,16 @@ public abstract class AbstractFSWAL implements WAL {
LOG.info("New WAL " + newPathString);
}
return newPath;
- } finally {
- scope.close();
}
}
protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
try {
- syncFuture.get(walSyncTimeoutNs);
- return syncFuture.getSpan();
+ if (syncFuture != null) {
+ syncFuture.get(walSyncTimeoutNs);
+ }
+ return (syncFuture == null) ? null : syncFuture.getSpan();
} catch (TimeoutIOException tioe) {
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
// still refer to it, so if this thread use it next time may get a wrong
@@ -748,8 +745,7 @@ public abstract class AbstractFSWAL implements WAL {
LOG.debug("WAL closing. Skipping rolling of writer");
return regionsToFlush;
}
- TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
- try {
+ try (TraceScope scope = TraceUtil.createTrace("FSHLog.rollWriter")) {
Path oldPath = getOldPath();
Path newPath = getNewPath();
// Any exception from here on is catastrophic, non-recoverable so we currently abort.
@@ -774,8 +770,6 @@ public abstract class AbstractFSWAL implements WAL {
"for details.", exception);
} finally {
closeBarrier.endOp();
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close();
}
return regionsToFlush;
} finally {
@@ -950,7 +944,7 @@ public abstract class AbstractFSWAL implements WAL {
if (timeInNanos > this.slowSyncNs) {
String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000)
.append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString();
- Trace.addTimelineAnnotation(msg);
+ TraceUtil.addTimelineAnnotation(msg);
LOG.info(msg);
}
if (!listeners.isEmpty()) {
@@ -966,16 +960,20 @@ public abstract class AbstractFSWAL implements WAL {
if (this.closed) {
throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
}
- TraceScope scope = Trace.startSpan(implClassName + ".append");
MutableLong txidHolder = new MutableLong();
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
txidHolder.setValue(ringBuffer.next());
});
long txid = txidHolder.longValue();
- try {
+ try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
entry.stampRegionSequenceId(we);
- ringBuffer.get(txid).load(entry, scope.detach());
+ if(scope!=null){
+ ringBuffer.get(txid).load(entry, scope.getSpan());
+ }
+ else{
+ ringBuffer.get(txid).load(entry, null);
+ }
} finally {
ringBuffer.publish(txid);
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
index cff3f70ca16..d4e113a42be 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/AsyncFSWAL.java
@@ -44,6 +44,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.trace.TraceUtil;
+import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
@@ -52,18 +54,14 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.yetus.audience.InterfaceAudience;
-
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
import org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
import com.lmax.disruptor.RingBuffer;
import com.lmax.disruptor.Sequence;
@@ -342,9 +340,9 @@ public class AsyncFSWAL extends AbstractFSWAL {
}
private void addTimeAnnotation(SyncFuture future, String annotation) {
- TraceScope scope = Trace.continueSpan(future.getSpan());
- Trace.addTimelineAnnotation(annotation);
- future.setSpan(scope.detach());
+ TraceUtil.addTimelineAnnotation(annotation);
+ //TODO handle htrace API change, see HBASE-18895
+ //future.setSpan(scope.getSpan());
}
private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
@@ -415,14 +413,16 @@ public class AsyncFSWAL extends AbstractFSWAL {
Span span = entry.detachSpan();
// the span maybe null if this is a retry after rolling.
if (span != null) {
- TraceScope scope = Trace.continueSpan(span);
+ //TODO handle htrace API change, see HBASE-18895
+ //TraceScope scope = Trace.continueSpan(span);
try {
appended = append(writer, entry);
} catch (IOException e) {
throw new AssertionError("should not happen", e);
} finally {
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close(); // append scope is complete
+ //TODO handle htrace API change, see HBASE-18895
+ //assert scope == NullScope.INSTANCE || !scope.isDetached();
+ //scope.close(); // append scope is complete
}
} else {
try {
@@ -559,24 +559,26 @@ public class AsyncFSWAL extends AbstractFSWAL {
@Override
public void sync() throws IOException {
- TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
- try {
+ try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")){
long txid = waitingConsumePayloads.next();
- SyncFuture future;
+ SyncFuture future = null;
try {
- future = getSyncFuture(txid, scope.detach());
- RingBufferTruck truck = waitingConsumePayloads.get(txid);
- truck.load(future);
+ if (scope != null) {
+ future = getSyncFuture(txid, scope.getSpan());
+ RingBufferTruck truck = waitingConsumePayloads.get(txid);
+ truck.load(future);
+ }
} finally {
waitingConsumePayloads.publish(txid);
}
if (shouldScheduleConsumer()) {
eventLoop.execute(consumer);
}
- scope = Trace.continueSpan(blockOnSync(future));
- } finally {
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close();
+ //TODO handle htrace API change, see HBASE-18895
+ //scope = Trace.continueSpan(blockOnSync(future));
+ if (future != null) {
+ blockOnSync(future);
+ }
}
}
@@ -585,25 +587,27 @@ public class AsyncFSWAL extends AbstractFSWAL {
if (highestSyncedTxid.get() >= txid) {
return;
}
- TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
- try {
+ try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")) {
// here we do not use ring buffer sequence as txid
long sequence = waitingConsumePayloads.next();
- SyncFuture future;
+ SyncFuture future = null;
try {
- future = getSyncFuture(txid, scope.detach());
- RingBufferTruck truck = waitingConsumePayloads.get(sequence);
- truck.load(future);
+ if(scope!= null) {
+ future = getSyncFuture(txid, scope.getSpan());
+ RingBufferTruck truck = waitingConsumePayloads.get(sequence);
+ truck.load(future);
+ }
} finally {
waitingConsumePayloads.publish(sequence);
}
if (shouldScheduleConsumer()) {
eventLoop.execute(consumer);
}
- scope = Trace.continueSpan(blockOnSync(future));
- } finally {
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close();
+ //TODO handle htrace API change, see HBASE-18895
+ //scope = Trace.continueSpan(blockOnSync(future));
+ if (future != null) {
+ blockOnSync(future);
+ }
}
}
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
index cc9601b5b28..c4e23dac802 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSHLog.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@@ -54,10 +55,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.htrace.NullScope;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
@@ -345,7 +344,7 @@ public class FSHLog extends AbstractFSWAL {
// use assert to make sure no change breaks the logic that
// sequence and zigzagLatch will be set together
assert sequence > 0L : "Failed to get sequence from ring buffer";
- Trace.addTimelineAnnotation("awaiting safepoint");
+ TraceUtil.addTimelineAnnotation("awaiting safepoint");
syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence));
}
} catch (FailedSyncBeforeLogCloseException e) {
@@ -361,9 +360,9 @@ public class FSHLog extends AbstractFSWAL {
if (this.writer != null) {
oldFileLen = this.writer.getLength();
try {
- Trace.addTimelineAnnotation("closing writer");
+ TraceUtil.addTimelineAnnotation("closing writer");
this.writer.close();
- Trace.addTimelineAnnotation("writer closed");
+ TraceUtil.addTimelineAnnotation("writer closed");
this.closeErrorCount.set(0);
} catch (IOException ioe) {
int errors = closeErrorCount.incrementAndGet();
@@ -595,13 +594,14 @@ public class FSHLog extends AbstractFSWAL {
}
// I got something. Lets run. Save off current sequence number in case it changes
// while we run.
- TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
+ //TODO handle htrace API change, see HBASE-18895
+ //TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
long start = System.nanoTime();
Throwable lastException = null;
try {
- Trace.addTimelineAnnotation("syncing writer");
+ TraceUtil.addTimelineAnnotation("syncing writer");
writer.sync();
- Trace.addTimelineAnnotation("writer synced");
+ TraceUtil.addTimelineAnnotation("writer synced");
currentSequence = updateHighestSyncedSequence(currentSequence);
} catch (IOException e) {
LOG.error("Error syncing, request close of WAL", e);
@@ -611,7 +611,8 @@ public class FSHLog extends AbstractFSWAL {
lastException = e;
} finally {
// reattach the span to the future before releasing.
- takeSyncFuture.setSpan(scope.detach());
+ //TODO handle htrace API change, see HBASE-18895
+ // takeSyncFuture.setSpan(scope.getSpan());
// First release what we 'took' from the queue.
syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException);
// Can we release other syncs?
@@ -727,8 +728,15 @@ public class FSHLog extends AbstractFSWAL {
}
// Sync all known transactions
- private Span publishSyncThenBlockOnCompletion(Span span) throws IOException {
- return blockOnSync(publishSyncOnRingBuffer(span));
+ private void publishSyncThenBlockOnCompletion(TraceScope scope) throws IOException {
+ if (scope != null) {
+ SyncFuture syncFuture = publishSyncOnRingBuffer(scope.getSpan());
+ blockOnSync(syncFuture);
+ }
+ else {
+ SyncFuture syncFuture = publishSyncOnRingBuffer(null);
+ blockOnSync(syncFuture);
+ }
}
/**
@@ -754,12 +762,8 @@ public class FSHLog extends AbstractFSWAL {
@Override
public void sync() throws IOException {
- TraceScope scope = Trace.startSpan("FSHLog.sync");
- try {
- scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
- } finally {
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close();
+ try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
+ publishSyncThenBlockOnCompletion(scope);
}
}
@@ -769,12 +773,8 @@ public class FSHLog extends AbstractFSWAL {
// Already sync'd.
return;
}
- TraceScope scope = Trace.startSpan("FSHLog.sync");
- try {
- scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
- } finally {
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close();
+ try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
+ publishSyncThenBlockOnCompletion(scope);
}
}
@@ -996,7 +996,8 @@ public class FSHLog extends AbstractFSWAL {
}
} else if (truck.type() == RingBufferTruck.Type.APPEND) {
FSWALEntry entry = truck.unloadAppend();
- TraceScope scope = Trace.continueSpan(entry.detachSpan());
+ //TODO handle htrace API change, see HBASE-18895
+ //TraceScope scope = Trace.continueSpan(entry.detachSpan());
try {
if (this.exception != null) {
@@ -1015,9 +1016,6 @@ public class FSHLog extends AbstractFSWAL {
: new DamagedWALException("On sync", this.exception));
// Return to keep processing events coming off the ringbuffer
return;
- } finally {
- assert scope == NullScope.INSTANCE || !scope.isDetached();
- scope.close(); // append scope is complete
}
} else {
// What is this if not an append or sync. Fail all up to this!!!
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
index 03ef0089eab..debe9e40f98 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/FSWALEntry.java
@@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKey;
-import org.apache.htrace.Span;
+import org.apache.htrace.core.Span;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
index a63b281929c..021f6a1463f 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/RingBufferTruck.java
@@ -18,8 +18,8 @@
*/
package org.apache.hadoop.hbase.regionserver.wal;
+import org.apache.htrace.core.Span;
import org.apache.yetus.audience.InterfaceAudience;
-import org.apache.htrace.Span;
/**
* A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
index 13d103bd067..0dbd0208e30 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/wal/SyncFuture.java
@@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
-import org.apache.htrace.Span;
+import org.apache.htrace.core.Span;
/**
* A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
index 0a1c60f6947..3b3d5683cf0 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/HBaseTestingUtility.java
@@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
@@ -657,6 +658,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
setLevel(org.apache.log4j.Level.ERROR);
+ TraceUtil.initTracer(conf);
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
true, null, null, hosts, null);
@@ -1125,6 +1127,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
Configuration c = new Configuration(this.conf);
+ TraceUtil.initTracer(c);
this.hbaseCluster =
new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
// Don't leave here till we've done a successful scan of the hbase:meta
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
index bb91770462d..8521e653b20 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/ipc/TestSimpleRpcScheduler.java
@@ -563,7 +563,7 @@ public class TestSimpleRpcScheduler {
ServerCall putCall = new ServerCall(1, null, null,
RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(),
RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))),
- null, null, 9, null, null, timestamp, 0, null, null, null) {
+ null, null, 9, null, timestamp, 0, null, null, null) {
@Override
public void sendResponseIfReady() throws IOException {
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
index 63bcbdf852a..33f77efc000 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TestHTraceHooks.java
@@ -18,25 +18,21 @@
package org.apache.hadoop.hbase.trace;
import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
-import java.lang.reflect.Method;
-import java.util.Collection;
-
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Span;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.TraceTree;
-import org.apache.htrace.impl.POJOSpanReceiver;
+import org.apache.htrace.core.POJOSpanReceiver;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanId;
+import org.apache.htrace.core.TraceScope;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
@@ -44,103 +40,84 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
+import java.util.Collection;
+import java.util.LinkedList;
+import java.util.List;
+
@Category({MiscTests.class, MediumTests.class})
public class TestHTraceHooks {
private static final byte[] FAMILY_BYTES = "family".getBytes();
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static POJOSpanReceiver rcvr;
- private static long ROOT_SPAN_ID = 0;
+ private static SpanId ROOT_SPAN_ID = new SpanId(0, 0);
@Rule
public TestName name = new TestName();
@BeforeClass
public static void before() throws Exception {
-
- // Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32
- // to find out to detect if we are using HTrace 3.2 or not.
- try {
- Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class);
- } catch (NoSuchMethodException e) {
- ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2
- }
-
TEST_UTIL.startMiniCluster(2, 3);
rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
- Trace.addReceiver(rcvr);
+ TraceUtil.addReceiver(rcvr);
+ TraceUtil.addSampler(new Sampler() {
+ @Override
+ public boolean next() {
+ return true;
+ }
+ });
}
@AfterClass
public static void after() throws Exception {
TEST_UTIL.shutdownMiniCluster();
- Trace.removeReceiver(rcvr);
+ TraceUtil.removeReceiver(rcvr);
rcvr = null;
}
@Test
public void testTraceCreateTable() throws Exception {
- TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
Table table;
- try {
-
+ Span createTableSpan;
+ try (TraceScope scope = TraceUtil.createTrace("creating table")) {
+ createTableSpan = scope.getSpan();
table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES);
- } finally {
- tableCreationSpan.close();
}
// Some table creation is async. Need to make sure that everything is full in before
// checking to see if the spans are there.
- TEST_UTIL.waitFor(1000, new Waiter.Predicate() {
- @Override
- public boolean evaluate() throws Exception {
- return rcvr.getSpans().size() >= 5;
+ TEST_UTIL.waitFor(10000, new Waiter.Predicate() {
+ @Override public boolean evaluate() throws Exception {
+ return (rcvr == null) ? true : rcvr.getSpans().size() >= 5;
}
});
- Collection spans = rcvr.getSpans();
+ Collection spans = Sets.newHashSet(rcvr.getSpans());
+ List roots = new LinkedList<>();
TraceTree traceTree = new TraceTree(spans);
- Collection roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
+ roots.addAll(traceTree.getSpansByParent().find(createTableSpan.getSpanId()));
- assertEquals(1, roots.size());
- Span createTableRoot = roots.iterator().next();
+ assertEquals(3, roots.size());
+ assertEquals("creating table", createTableSpan.getDescription());
- assertEquals("creating table", createTableRoot.getDescription());
-
- int createTableCount = 0;
-
- for (Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) {
- if (s.getDescription().startsWith("MasterService.CreateTable")) {
- createTableCount++;
- }
+ if (spans != null) {
+ assertTrue(spans.size() > 5);
}
- assertTrue(createTableCount >= 1);
- assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3);
- assertTrue(spans.size() > 5);
-
Put put = new Put("row".getBytes());
put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
- TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
- try {
+ Span putSpan;
+
+ try (TraceScope scope = TraceUtil.createTrace("doing put")) {
+ putSpan = scope.getSpan();
table.put(put);
- } finally {
- putSpan.close();
}
spans = rcvr.getSpans();
traceTree = new TraceTree(spans);
- roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
-
- assertEquals(2, roots.size());
- Span putRoot = null;
- for (Span root : roots) {
- if (root.getDescription().equals("doing put")) {
- putRoot = root;
- }
- }
-
- assertNotNull(putRoot);
+ roots.clear();
+ roots.addAll(traceTree.getSpansByParent().find(putSpan.getSpanId()));
+ assertEquals(1, roots.size());
}
}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java
new file mode 100644
index 00000000000..bba4ee5f64b
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/trace/TraceTree.java
@@ -0,0 +1,134 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.trace;
+
+import org.apache.htrace.core.Span;
+import org.apache.htrace.core.SpanId;
+
+import java.util.Collection;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedHashSet;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Used to create the graph formed by spans.
+ */
+public class TraceTree {
+
+ public static class SpansByParent {
+ private final Set set;
+
+ private final HashMap> parentToSpans;
+
+ SpansByParent(Collection spans) {
+ set = new LinkedHashSet();
+ parentToSpans = new HashMap>();
+ if(spans == null) {
+ return;
+ }
+ for (Span span : spans) {
+ set.add(span);
+ for (SpanId parent : span.getParents()) {
+ LinkedList list = parentToSpans.get(parent);
+ if (list == null) {
+ list = new LinkedList();
+ parentToSpans.put(parent, list);
+ }
+ list.add(span);
+ }
+ if (span.getParents().length == 0) {
+ LinkedList list = parentToSpans.get(Long.valueOf(0L));
+ if (list == null) {
+ list = new LinkedList();
+ parentToSpans.put(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE), list);
+ }
+ list.add(span);
+ }
+ }
+
+ }
+
+ public List find(SpanId parentId) {
+ LinkedList spans = parentToSpans.get(parentId);
+ if (spans == null) {
+ return new LinkedList();
+ }
+ return spans;
+ }
+
+ public Iterator iterator() {
+ return Collections.unmodifiableSet(set).iterator();
+ }
+ }
+
+ public static class SpansByProcessId {
+ private final Set set;
+
+ SpansByProcessId(Collection spans) {
+ set = new LinkedHashSet();
+ if(spans == null) {
+ return;
+ }
+ for (Span span : spans) {
+ set.add(span);
+ }
+ }
+
+ public Iterator iterator() {
+ return Collections.unmodifiableSet(set).iterator();
+ }
+ }
+
+ private final SpansByParent spansByParent;
+ private final SpansByProcessId spansByProcessId;
+
+ /**
+ * Create a new TraceTree
+ *
+ * @param spans The collection of spans to use to create this TraceTree. Should
+ * have at least one root span.
+ */
+ public TraceTree(Collection spans) {
+ this.spansByParent = new SpansByParent(spans);
+ this.spansByProcessId = new SpansByProcessId(spans);
+ }
+
+ public SpansByParent getSpansByParent() {
+ return spansByParent;
+ }
+
+ public SpansByProcessId getSpansByProcessId() {
+ return spansByProcessId;
+ }
+
+ @Override
+ public String toString() {
+ StringBuilder bld = new StringBuilder();
+ String prefix = "";
+ for (Iterator iter = spansByParent.iterator(); iter.hasNext();) {
+ Span span = iter.next();
+ bld.append(prefix).append(span.toString());
+ prefix = "\n";
+ }
+ return bld.toString();
+ }
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
index 75aba037b36..9a5c19dd260 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/wal/WALPerformanceEvaluation.java
@@ -58,16 +58,17 @@ import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
+import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
-import org.apache.htrace.Sampler;
-import org.apache.htrace.Trace;
-import org.apache.htrace.TraceScope;
-import org.apache.htrace.impl.ProbabilitySampler;
+import org.apache.htrace.core.ProbabilitySampler;
+import org.apache.htrace.core.Sampler;
+import org.apache.htrace.core.TraceScope;
+import org.apache.htrace.core.Tracer;
import org.apache.yetus.audience.InterfaceAudience;
import com.codahale.metrics.ConsoleReporter;
@@ -172,15 +173,13 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
Random rand = new Random(Thread.currentThread().getId());
WAL wal = region.getWAL();
- TraceScope threadScope =
- Trace.startSpan("WALPerfEval." + Thread.currentThread().getName());
- try {
+ try (TraceScope threadScope = TraceUtil.createTrace("WALPerfEval." + Thread.currentThread().getName())) {
long startTime = System.currentTimeMillis();
int lastSync = 0;
+ TraceUtil.addSampler(loopSampler);
for (int i = 0; i < numIterations; ++i) {
- assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected.";
- TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler);
- try {
+ assert Tracer.getCurrentSpan() == threadScope.getSpan() : "Span leak detected.";
+ try (TraceScope loopScope = TraceUtil.createTrace("runLoopIter" + i)) {
long now = System.nanoTime();
Put put = setupPut(rand, key, value, numFamilies);
WALEdit walEdit = new WALEdit();
@@ -196,16 +195,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
}
}
latencyHistogram.update(System.nanoTime() - now);
- } finally {
- loopScope.close();
}
}
long totalTime = (System.currentTimeMillis() - startTime);
logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
} catch (Exception e) {
LOG.error(getClass().getSimpleName() + " Thread failed", e);
- } finally {
- threadScope.close();
}
}
}
@@ -315,8 +310,9 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
LOG.info("FileSystem: " + fs);
SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
- final Sampler> sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
- TraceScope scope = Trace.startSpan("WALPerfEval", sampler);
+ final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
+ TraceUtil.addSampler(sampler);
+ TraceScope scope = TraceUtil.createTrace("WALPerfEval");
try {
if (rootRegionDir == null) {
@@ -338,8 +334,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
// a table per desired region means we can avoid carving up the key space
final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
- benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
- syncInterval, traceFreq));
+ benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
+ syncInterval, traceFreq), "");
}
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
@@ -389,9 +385,15 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
}
} finally {
// We may be called inside a test that wants to keep on using the fs.
- if (!noclosefs) fs.close();
- scope.close();
- if (receiverHost != null) receiverHost.closeReceivers();
+ if (!noclosefs) {
+ fs.close();
+ }
+ if (scope != null) {
+ scope.close();
+ }
+ if (receiverHost != null) {
+ receiverHost.closeReceivers();
+ }
}
return(0);
diff --git a/hbase-shell/pom.xml b/hbase-shell/pom.xml
index 19ad91b83a5..f42594b8a0c 100644
--- a/hbase-shell/pom.xml
+++ b/hbase-shell/pom.xml
@@ -305,6 +305,12 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
@@ -352,6 +358,10 @@
org.apache.hadoop
hadoop-hdfs
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -364,6 +374,10 @@
test-jar
test
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -375,6 +389,10 @@
hadoop-minicluster
test
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -429,11 +447,21 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
diff --git a/hbase-shell/src/main/ruby/shell/commands/trace.rb b/hbase-shell/src/main/ruby/shell/commands/trace.rb
index 5ecd28cd0d4..b0350be2ef0 100644
--- a/hbase-shell/src/main/ruby/shell/commands/trace.rb
+++ b/hbase-shell/src/main/ruby/shell/commands/trace.rb
@@ -16,8 +16,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
-HTrace = org.apache.htrace.Trace
-java_import org.apache.htrace.Sampler
+HTrace = org.apache.htrace.core.Tracer
+java_import org.apache.htrace.core.Sampler
java_import org.apache.hadoop.hbase.trace.SpanReceiverHost
module Shell
diff --git a/hbase-spark/pom.xml b/hbase-spark/pom.xml
new file mode 100644
index 00000000000..db2c03d6fda
--- /dev/null
+++ b/hbase-spark/pom.xml
@@ -0,0 +1,716 @@
+
+
+
+ 4.0.0
+
+ hbase-build-configuration
+ org.apache.hbase
+ 3.0.0-SNAPSHOT
+ ../hbase-build-configuration
+
+ hbase-spark
+ Apache HBase - Spark
+
+ 1.6.0
+ 2.10.4
+ 2.10
+ ${project.basedir}/..
+
+
+
+ org.apache.hbase.thirdparty
+ hbase-shaded-miscellaneous
+
+
+
+ javax.servlet
+ javax.servlet-api
+ test
+
+
+
+ org.scala-lang
+ scala-library
+ ${scala.version}
+ provided
+
+
+
+ org.apache.spark
+ spark-core_${scala.binary.version}
+ ${spark.version}
+ provided
+
+
+
+ org.scala-lang
+ scala-library
+
+
+
+ org.scala-lang
+ scalap
+
+
+ com.google.code.findbugs
+ jsr305
+
+
+
+
+ com.google.code.findbugs
+ jsr305
+ 1.3.9
+ provided
+ true
+
+
+ org.apache.spark
+ spark-sql_${scala.binary.version}
+ ${spark.version}
+ provided
+
+
+ org.apache.spark
+ spark-streaming_${scala.binary.version}
+ ${spark.version}
+ provided
+
+
+ org.apache.spark
+ spark-streaming_${scala.binary.version}
+ ${spark.version}
+ test-jar
+ tests
+ test
+
+
+ junit
+ junit
+ test
+
+
+ org.scalatest
+ scalatest_${scala.binary.version}
+ 2.2.4
+ test
+
+
+ org.scalamock
+ scalamock-scalatest-support_${scala.binary.version}
+ 3.1.4
+ test
+
+
+ com.fasterxml.jackson.module
+ jackson-module-scala_${scala.binary.version}
+ ${jackson.version}
+
+
+ org.scala-lang
+ scala-library
+
+
+ org.scala-lang
+ scala-reflect
+
+
+
+
+ org.apache.hadoop
+ hadoop-client
+ ${hadoop-two.version}
+
+
+ log4j
+ log4j
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.jruby
+ jruby-complete
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ ${hadoop-two.version}
+
+
+ org.apache.htrace
+ htrace-core
+
+
+ log4j
+ log4j
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.jruby
+ jruby-complete
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+ com.google.code.findbugs
+ jsr305
+
+
+
+
+ org.apache.hadoop
+ hadoop-common
+ ${hadoop-two.version}
+ test-jar
+ test
+
+
+ org.apache.htrace
+ htrace-core
+
+
+ log4j
+ log4j
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.jruby
+ jruby-complete
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+ com.google.code.findbugs
+ jsr305
+
+
+
+
+ org.apache.hadoop
+ hadoop-hdfs
+ ${hadoop-two.version}
+ test-jar
+ test
+
+
+ org.apache.htrace
+ htrace-core
+
+
+ log4j
+ log4j
+
+
+ javax.servlet
+ servlet-api
+
+
+ javax.servlet.jsp
+ jsp-api
+
+
+ org.jruby
+ jruby-complete
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+ xerces
+ xercesImpl
+
+
+
+
+ org.apache.hbase
+ hbase-client
+
+
+ log4j
+ log4j
+
+
+ org.apache.thrift
+ thrift
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ org.mortbay.jetty
+ jsp-2.1
+
+
+ org.mortbay.jetty
+ jsp-api-2.1
+
+
+ org.mortbay.jetty
+ servlet-api-2.5
+
+
+ com.sun.jersey
+ jersey-core
+
+
+ com.sun.jersey
+ jersey-json
+
+
+ com.sun.jersey
+ jersey-server
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ tomcat
+ jasper-runtime
+
+
+ tomcat
+ jasper-compiler
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+
+
+ org.apache.hbase
+ hbase-protocol
+ ${project.version}
+
+
+ org.apache.hbase
+ hbase-protocol-shaded
+ ${project.version}
+
+
+ org.apache.hbase
+ hbase-annotations
+ ${project.version}
+
+
+ org.apache.hbase
+ hbase-common
+ ${project.version}
+
+
+ org.apache.hbase
+ hbase-annotations
+ ${project.version}
+ test-jar
+ test
+
+
+ org.apache.hbase
+ hbase-hadoop-compat
+ ${project.version}
+ test
+ test-jar
+
+
+ log4j
+ log4j
+
+
+ org.apache.thrift
+ thrift
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ org.mortbay.jetty
+ jsp-2.1
+
+
+ org.mortbay.jetty
+ jsp-api-2.1
+
+
+ org.mortbay.jetty
+ servlet-api-2.5
+
+
+ com.sun.jersey
+ jersey-core
+
+
+ com.sun.jersey
+ jersey-json
+
+
+ com.sun.jersey
+ jersey-server
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ tomcat
+ jasper-runtime
+
+
+ tomcat
+ jasper-compiler
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+
+
+ org.apache.hbase
+ hbase-hadoop2-compat
+ ${project.version}
+ test
+ test-jar
+
+
+ log4j
+ log4j
+
+
+ org.apache.thrift
+ thrift
+
+
+ org.slf4j
+ slf4j-log4j12
+
+
+ org.mortbay.jetty
+ jsp-2.1
+
+
+ org.mortbay.jetty
+ jsp-api-2.1
+
+
+ org.mortbay.jetty
+ servlet-api-2.5
+
+
+ com.sun.jersey
+ jersey-core
+
+
+ com.sun.jersey
+ jersey-json
+
+
+ com.sun.jersey
+ jersey-server
+
+
+ org.mortbay.jetty
+ jetty
+
+
+ org.mortbay.jetty
+ jetty-util
+
+
+ tomcat
+ jasper-runtime
+
+
+ tomcat
+ jasper-compiler
+
+
+ org.jboss.netty
+ netty
+
+
+ io.netty
+ netty
+
+
+
+
+ org.apache.hbase
+ hbase-server
+ ${project.version}
+
+
+ org.apache.hbase
+ hbase-server
+ ${project.version}
+ test
+ test-jar
+
+
+ org.apache.hbase
+ hbase-mapreduce
+
+
+ com.google.protobuf
+ protobuf-java
+
+
+ commons-io
+ commons-io
+
+
+ org.apache.hadoop
+ hadoop-mapreduce-client-jobclient
+ test-jar
+ test
+
+
+ org.apache.avro
+ avro
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-compiler-plugin
+
+
+ net.alchim31.maven
+ scala-maven-plugin
+ 3.2.0
+
+ ${project.build.sourceEncoding}
+ ${scala.version}
+
+ -feature
+
+
+
+
+ scala-compile-first
+ process-resources
+
+ add-source
+ compile
+
+
+
+ scala-test-compile
+ process-test-resources
+
+ testCompile
+
+
+
+
+
+ org.scalatest
+ scalatest-maven-plugin
+ 1.0
+
+ ${project.build.directory}/surefire-reports
+ .
+ WDF TestSuite.txt
+ false
+
+ org.apache.hadoop.hbase.shaded.
+
+
+
+
+ test
+ test
+
+ test
+
+
+
+ org.apache.hadoop.hbase.shaded.
+
+ -Xmx1536m -XX:ReservedCodeCacheSize=512m
+
+ false
+
+
+
+
+
+
+ org.codehaus.mojo
+ build-helper-maven-plugin
+
+
+ add-source
+ validate
+
+ add-source
+
+
+
+
+
+
+
+
+ add-test-source
+ validate
+
+ add-test-source
+
+
+
+
+
+
+
+
+
+
+ org.xolstice.maven.plugins
+ protobuf-maven-plugin
+
+
+ compile-protoc
+ generate-sources
+
+ compile
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-enforcer-plugin
+
+
+
+ banned-jsr305
+
+ enforce
+
+
+ false
+
+
+
+
+ banned-scala
+
+ enforce
+
+
+ true
+
+
+
+
+
+
+
+
+
+ skipSparkTests
+
+
+ skipSparkTests
+
+
+
+ true
+ true
+ true
+
+
+
+
diff --git a/hbase-testing-util/pom.xml b/hbase-testing-util/pom.xml
index 1e5f13db847..7c8b0548d1e 100644
--- a/hbase-testing-util/pom.xml
+++ b/hbase-testing-util/pom.xml
@@ -140,6 +140,12 @@
org.apache.hadoop
hadoop-common
compile
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
@@ -184,6 +190,10 @@
hadoop-hdfs
compile
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -196,6 +206,10 @@
test-jar
compile
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -207,6 +221,10 @@
hadoop-minicluster
compile
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -239,11 +257,23 @@
org.apache.hadoop
hadoop-common
compile
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
compile
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
diff --git a/hbase-thrift/pom.xml b/hbase-thrift/pom.xml
index fd662893c5b..2088ce02f67 100644
--- a/hbase-thrift/pom.xml
+++ b/hbase-thrift/pom.xml
@@ -513,12 +513,22 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
test
+
+ org.apache.htrace
+ htrace-core
+
com.google.guava
guava
@@ -571,10 +581,22 @@
org.apache.hadoop
hadoop-common
+
+
+ org.apache.htrace
+ htrace-core
+
+
org.apache.hadoop
hadoop-minicluster
+
+
+ org.apache.htrace
+ htrace-core
+
+
diff --git a/pom.xml b/pom.xml
index 8562686d783..d3b47a3fba8 100644
--- a/pom.xml
+++ b/pom.xml
@@ -1344,7 +1344,8 @@
9.1.10.0
4.12
1.3
- 3.2.0-incubating
+ 4.2.0-incubating
+ 3.2.0-incubating
1.2.17
2.1.0
@@ -1959,7 +1960,7 @@
org.apache.htrace
- htrace-core
+ htrace-core4
${htrace.version}
@@ -2392,6 +2393,10 @@
org.apache.hadoop
hadoop-hdfs
+
+ org.apache.htrace
+ htrace-core
+
javax.servlet.jsp
jsp-api
@@ -2430,6 +2435,10 @@
test-jar
test
+
+ org.apache.htrace
+ htrace-core
+
javax.servlet.jsp
jsp-api
@@ -2470,6 +2479,10 @@
hadoop-common
${hadoop-two.version}
+
+ org.apache.htrace
+ htrace-core
+
commons-beanutils
commons-beanutils
@@ -2520,10 +2533,14 @@
hadoop-minicluster
${hadoop-two.version}
-
- commons-httpclient
- commons-httpclient
-
+
+ org.apache.htrace
+ htrace-core
+
+
+ commons-httpclient
+ commons-httpclient
+
javax.servlet.jsp
jsp-api
@@ -2630,6 +2647,10 @@
org.apache.hadoop
hadoop-hdfs
+
+ org.apache.htrace
+ htrace-core
+
javax.servlet.jsp
jsp-api
@@ -2664,6 +2685,10 @@
test-jar
test
+
+ org.apache.htrace
+ htrace-core
+
javax.servlet.jsp
jsp-api
@@ -2706,10 +2731,14 @@
hadoop-common
${hadoop-three.version}
-
- commons-beanutils
- commons-beanutils
-
+
+ org.apache.htrace
+ htrace-core
+
+
+ commons-beanutils
+ commons-beanutils
+
javax.servlet.jsp
jsp-api
@@ -2761,10 +2790,14 @@
hadoop-minicluster
${hadoop-three.version}
-
- commons-httpclient
- commons-httpclient
-
+
+ org.apache.htrace
+ htrace-core
+
+
+ commons-httpclient
+ commons-httpclient
+
javax.servlet.jsp
jsp-api
diff --git a/src/main/asciidoc/_chapters/tracing.adoc b/src/main/asciidoc/_chapters/tracing.adoc
index 0cddd8a522f..2982832cfa1 100644
--- a/src/main/asciidoc/_chapters/tracing.adoc
+++ b/src/main/asciidoc/_chapters/tracing.adoc
@@ -57,7 +57,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_ for a `hbase.local-fi
hbase.trace.spanreceiver.classes
- org.apache.htrace.impl.LocalFileSpanReceiver
+ org.apache.htrace.core.LocalFileSpanReceiver
hbase.htrace.local-file-span-receiver.path
@@ -76,7 +76,7 @@ _htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7
hbase.trace.spanreceiver.classes
- org.apache.htrace.impl.ZipkinSpanReceiver
+ org.apache.htrace.core.ZipkinSpanReceiver
hbase.htrace.zipkin.collector-hostname