HBASE-18601: Update Htrace to 4.2

Updated HTrace version to 4.2
Created TraceUtil class to wrap htrace methods. Uses try with resources.

Signed-off-by: Balazs Meszaros <balazs.meszaros@cloudera.com>
Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
Tamas Penzes 2017-10-26 23:23:42 +02:00 committed by Michael Stack
parent 8a5273f38c
commit 7a69ebc73e
65 changed files with 1686 additions and 513 deletions

View File

@ -174,6 +174,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
@ -287,6 +291,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -166,7 +166,7 @@
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>org.jruby.jcodings</groupId>
@ -258,6 +258,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
@ -326,6 +330,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.RegionLocations;
import org.apache.hadoop.hbase.RetryImmediatelyException;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
@ -56,7 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.htrace.Trace;
import org.apache.htrace.core.Tracer;
/**
* The context, and return value, for a single submit/submitAll call.
@ -582,7 +583,13 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
asyncProcess.incTaskCounters(multiAction.getRegions(), server);
SingleServerRequestRunnable runnable = createSingleServerRequest(
multiAction, numAttempt, server, callsInProgress);
return Collections.singletonList(Trace.wrap("AsyncProcess.sendMultiAction", runnable));
Tracer tracer = Tracer.curThreadTracer();
if (tracer == null) {
return Collections.singletonList(runnable);
} else {
return Collections.singletonList(tracer.wrap(runnable, "AsyncProcess.sendMultiAction"));
}
}
// group the actions by the amount of delay
@ -618,7 +625,7 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
asyncProcess.connection.getConnectionMetrics().incrNormalRunners();
}
}
runnable = Trace.wrap(traceText, runnable);
runnable = TraceUtil.wrap(runnable, traceText);
toReturn.add(runnable);
}

View File

@ -28,9 +28,9 @@ import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.htrace.Trace;
/**
* A completion service for the RpcRetryingCallerFactory.
@ -168,7 +168,7 @@ public class ResultBoundedCompletionService<V> {
public void submit(RetryingCallable<V> task, int callTimeout, int id) {
QueueingFuture<V> newFuture = new QueueingFuture<>(task, callTimeout, id);
executor.execute(Trace.wrap(newFuture));
executor.execute(TraceUtil.wrap(newFuture, "ResultBoundedCompletionService.submit"));
tasks[id] = newFuture;
}

View File

@ -24,10 +24,6 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.isFatalConnectionException;
import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled;
import static org.apache.hadoop.hbase.ipc.IPCUtil.write;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.DataInputStream;
@ -55,10 +51,15 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback;
import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
import org.apache.hadoop.hbase.security.SaslUtil;
import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
@ -66,17 +67,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHea
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
import org.apache.hadoop.hbase.security.SaslUtil;
import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.ExceptionUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Thread that reads responses and notifies callers. Each connection owns a socket connected to a
@ -574,7 +573,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable {
}
private void tracedWriteRequest(Call call) throws IOException {
try (TraceScope ignored = Trace.startSpan("RpcClientImpl.tracedWriteRequest", call.span)) {
try (TraceScope ignored = TraceUtil.createTrace("RpcClientImpl.tracedWriteRequest",
call.span)) {
writeRequest(call);
}
}

View File

@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellScanner;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.MetricsConnection;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.Tracer;
/** A call waiting for a value. */
@InterfaceAudience.Private
@ -73,7 +73,7 @@ class Call {
this.timeout = timeout;
this.priority = priority;
this.callback = callback;
this.span = Trace.currentSpan();
this.span = Tracer.getCurrentSpan();
}
@Override

View File

@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.ipc.RemoteException;
@ -102,10 +101,11 @@ class IPCUtil {
static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) {
RequestHeader.Builder builder = RequestHeader.newBuilder();
builder.setCallId(call.id);
if (call.span != null) {
//TODO handle htrace API change, see HBASE-18895
/*if (call.span != null) {
builder.setTraceInfo(RPCTInfo.newBuilder().setParentId(call.span.getSpanId())
.setTraceId(call.span.getTraceId()));
}
.setTraceId(call.span.getTracerId()));
}*/
builder.setMethodName(call.md.getName());
builder.setRequestParam(call.param != null);
if (cellBlockMeta != null) {

View File

@ -33,8 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.RetryCounter;
import org.apache.hadoop.hbase.util.RetryCounterFactory;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.CreateMode;
@ -156,11 +157,8 @@ public class RecoverableZooKeeper {
* This function will not throw NoNodeException if the path does not
* exist.
*/
public void delete(String path, int version)
throws InterruptedException, KeeperException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.delete");
public void delete(String path, int version) throws InterruptedException, KeeperException {
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.delete")) {
RetryCounter retryCounter = retryCounterFactory.create();
boolean isRetry = false; // False for first attempt, true for all retries.
while (true) {
@ -197,8 +195,6 @@ public class RecoverableZooKeeper {
retryCounter.sleepUntilNextRetry();
isRetry = true;
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -206,11 +202,8 @@ public class RecoverableZooKeeper {
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, Watcher watcher)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.exists");
public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -236,8 +229,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -245,11 +236,8 @@ public class RecoverableZooKeeper {
* exists is an idempotent operation. Retry before throwing exception
* @return A Stat instance
*/
public Stat exists(String path, boolean watch)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.exists");
public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -275,8 +263,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -297,9 +283,7 @@ public class RecoverableZooKeeper {
*/
public List<String> getChildren(String path, Watcher watcher)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -325,8 +309,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -336,9 +318,7 @@ public class RecoverableZooKeeper {
*/
public List<String> getChildren(String path, boolean watch)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -364,8 +344,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -375,9 +353,7 @@ public class RecoverableZooKeeper {
*/
public byte[] getData(String path, Watcher watcher, Stat stat)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getData");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -403,8 +379,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -414,9 +388,7 @@ public class RecoverableZooKeeper {
*/
public byte[] getData(String path, boolean watch, Stat stat)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getData");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -442,8 +414,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -455,9 +425,7 @@ public class RecoverableZooKeeper {
*/
public Stat setData(String path, byte[] data, int version)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.setData");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setData")) {
RetryCounter retryCounter = retryCounterFactory.create();
byte[] newData = appendMetaData(id, data);
boolean isRetry = false;
@ -505,8 +473,6 @@ public class RecoverableZooKeeper {
retryCounter.sleepUntilNextRetry();
isRetry = true;
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -516,9 +482,7 @@ public class RecoverableZooKeeper {
*/
public List<ACL> getAcl(String path, Stat stat)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.getAcl");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getAcl")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -544,8 +508,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -555,9 +517,7 @@ public class RecoverableZooKeeper {
*/
public Stat setAcl(String path, List<ACL> acls, int version)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.setAcl");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setAcl")) {
RetryCounter retryCounter = retryCounterFactory.create();
while (true) {
try {
@ -583,8 +543,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -606,9 +564,7 @@ public class RecoverableZooKeeper {
public String create(String path, byte[] data, List<ACL> acl,
CreateMode createMode)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.create");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.create")) {
byte[] newData = appendMetaData(id, data);
switch (createMode) {
case EPHEMERAL:
@ -623,8 +579,6 @@ public class RecoverableZooKeeper {
throw new IllegalArgumentException("Unrecognized CreateMode: " +
createMode);
}
} finally {
if (traceScope != null) traceScope.close();
}
}
@ -753,9 +707,7 @@ public class RecoverableZooKeeper {
*/
public List<OpResult> multi(Iterable<Op> ops)
throws KeeperException, InterruptedException {
TraceScope traceScope = null;
try {
traceScope = Trace.startSpan("RecoverableZookeeper.multi");
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.multi")) {
RetryCounter retryCounter = retryCounterFactory.create();
Iterable<Op> multiOps = prepareZKMulti(ops);
while (true) {
@ -782,8 +734,6 @@ public class RecoverableZooKeeper {
}
retryCounter.sleepUntilNextRetry();
}
} finally {
if (traceScope != null) traceScope.close();
}
}

View File

@ -244,7 +244,7 @@
<!-- tracing Dependencies -->
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>
@ -344,6 +344,12 @@
<artifactId>hadoop-common</artifactId>
<!--FYI This pulls in hadoop's guava. Its needed for Configuration
at least-->
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
@ -390,6 +396,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>

View File

@ -18,16 +18,15 @@
package org.apache.hadoop.hbase.trace;
import org.apache.hadoop.conf.Configuration;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.htrace.HTraceConfiguration;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.htrace.core.HTraceConfiguration;
import org.apache.yetus.audience.InterfaceAudience;
@InterfaceAudience.Private
public class HBaseHTraceConfiguration extends HTraceConfiguration {
private static final Log LOG =
LogFactory.getLog(HBaseHTraceConfiguration.class);
private static final Log LOG = LogFactory.getLog(HBaseHTraceConfiguration.class);
public static final String KEY_PREFIX = "hbase.htrace.";
@ -65,7 +64,7 @@ public class HBaseHTraceConfiguration extends HTraceConfiguration {
@Override
public String get(String key) {
return conf.get(KEY_PREFIX +key);
return conf.get(KEY_PREFIX + key);
}
@Override

View File

@ -24,10 +24,8 @@ import java.util.HashSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.htrace.core.SpanReceiver;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.htrace.SpanReceiver;
import org.apache.htrace.SpanReceiverBuilder;
import org.apache.htrace.Trace;
/**
* This class provides functions for reading the names of SpanReceivers from
@ -62,6 +60,16 @@ public class SpanReceiverHost {
}
public static Configuration getConfiguration(){
synchronized (SingletonHolder.INSTANCE.lock) {
if (SingletonHolder.INSTANCE.host == null || SingletonHolder.INSTANCE.host.conf == null) {
return null;
}
return SingletonHolder.INSTANCE.host.conf;
}
}
SpanReceiverHost(Configuration conf) {
receivers = new HashSet<>();
this.conf = conf;
@ -78,18 +86,18 @@ public class SpanReceiverHost {
return;
}
SpanReceiverBuilder builder = new SpanReceiverBuilder(new HBaseHTraceConfiguration(conf));
SpanReceiver.Builder builder = new SpanReceiver.Builder(new HBaseHTraceConfiguration(conf));
for (String className : receiverNames) {
className = className.trim();
SpanReceiver receiver = builder.spanReceiverClass(className).build();
SpanReceiver receiver = builder.className(className).build();
if (receiver != null) {
receivers.add(receiver);
LOG.info("SpanReceiver " + className + " was loaded successfully.");
}
}
for (SpanReceiver rcvr : receivers) {
Trace.addReceiver(rcvr);
TraceUtil.addReceiver(rcvr);
}
}

View File

@ -0,0 +1,124 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.trace;
import org.apache.hadoop.conf.Configuration;
import org.apache.htrace.core.HTraceConfiguration;
import org.apache.htrace.core.Sampler;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.SpanReceiver;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
/**
* This wrapper class provides functions for accessing htrace 4+ functionality in a simplified way.
*/
public final class TraceUtil {
private static HTraceConfiguration conf;
private static Tracer tracer;
private TraceUtil() {
}
public static void initTracer(Configuration c) {
if(c != null) {
conf = new HBaseHTraceConfiguration(c);
}
if (tracer == null && conf != null) {
tracer = new Tracer.Builder("Tracer").conf(conf).build();
}
}
/**
* Wrapper method to create new TraceScope with the given description
* @return TraceScope or null when not tracing
*/
public static TraceScope createTrace(String description) {
return (tracer == null) ? null : tracer.newScope(description);
}
/**
* Wrapper method to create new child TraceScope with the given description
* and parent scope's spanId
* @param span parent span
* @return TraceScope or null when not tracing
*/
public static TraceScope createTrace(String description, Span span) {
if(span == null) return createTrace(description);
return (tracer == null) ? null : tracer.newScope(description, span.getSpanId());
}
/**
* Wrapper method to add new sampler to the default tracer
* @return true if added, false if it was already added
*/
public static boolean addSampler(Sampler sampler) {
if (sampler == null) {
return false;
}
return (tracer == null) ? false : tracer.addSampler(sampler);
}
/**
* Wrapper method to add key-value pair to TraceInfo of actual span
*/
public static void addKVAnnotation(String key, String value){
Span span = Tracer.getCurrentSpan();
if (span != null) {
span.addKVAnnotation(key, value);
}
}
/**
* Wrapper method to add receiver to actual tracerpool
* @return true if successfull, false if it was already added
*/
public static boolean addReceiver(SpanReceiver rcvr) {
return (tracer == null) ? false : tracer.getTracerPool().addReceiver(rcvr);
}
/**
* Wrapper method to remove receiver from actual tracerpool
* @return true if removed, false if doesn't exist
*/
public static boolean removeReceiver(SpanReceiver rcvr) {
return (tracer == null) ? false : tracer.getTracerPool().removeReceiver(rcvr);
}
/**
* Wrapper method to add timeline annotiation to current span with given message
*/
public static void addTimelineAnnotation(String msg) {
Span span = Tracer.getCurrentSpan();
if (span != null) {
span.addTimelineAnnotation(msg);
}
}
/**
* Wrap runnable with current tracer and description
* @param runnable to wrap
* @return wrapped runnable or original runnable when not tracing
*/
public static Runnable wrap(Runnable runnable, String description) {
return (tracer == null) ? runnable : tracer.wrap(runnable, description);
}
}

View File

@ -254,6 +254,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -285,6 +291,10 @@
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -296,6 +306,10 @@
<artifactId>hadoop-minicluster</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -330,11 +344,21 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>

View File

@ -245,6 +245,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>
@ -290,10 +296,22 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>

View File

@ -173,7 +173,7 @@
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>junit</groupId>
@ -244,6 +244,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -296,6 +300,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>

View File

@ -1,3 +1,4 @@
/**
* Copyright The Apache Software Foundation
*
@ -19,24 +20,6 @@
package org.apache.hadoop.hbase.io.hfile;
import net.spy.memcached.CachedData;
import net.spy.memcached.ConnectionFactoryBuilder;
import net.spy.memcached.FailureMode;
import net.spy.memcached.MemcachedClient;
import net.spy.memcached.transcoders.Transcoder;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.nio.SingleByteBuff;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
@ -46,6 +29,24 @@ import java.util.List;
import java.util.NoSuchElementException;
import java.util.concurrent.ExecutionException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
import org.apache.hadoop.hbase.nio.ByteBuff;
import org.apache.hadoop.hbase.nio.SingleByteBuff;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import net.spy.memcached.CachedData;
import net.spy.memcached.ConnectionFactoryBuilder;
import net.spy.memcached.FailureMode;
import net.spy.memcached.MemcachedClient;
import net.spy.memcached.transcoders.Transcoder;
/**
* Class to store blocks into memcached.
* This should only be used on a cluster of Memcached daemons that are tuned well and have a
@ -134,7 +135,7 @@ public class MemcachedBlockCache implements BlockCache {
// Assume that nothing is the block cache
HFileBlock result = null;
try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
try (TraceScope traceScope = TraceUtil.createTrace("MemcachedBlockCache.getBlock")) {
result = client.get(cacheKey.toString(), tc);
} catch (Exception e) {
// Catch a pretty broad set of exceptions to limit any changes in the memecache client

View File

@ -170,6 +170,12 @@ limitations under the License.
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop-two.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.commons</groupId>

View File

@ -266,7 +266,7 @@
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>javax.ws.rs</groupId>
@ -350,6 +350,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -400,10 +406,22 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>

View File

@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.NamespaceNotFoundException;
import org.apache.hadoop.hbase.TableExistsException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.chaos.actions.Action;
import org.apache.hadoop.hbase.chaos.actions.MoveRegionsOfTableAction;
import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction;
@ -62,20 +61,19 @@ import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
import org.apache.hadoop.hbase.ipc.FatalConnectionException;
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.LoadTestTool;
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.impl.AlwaysSampler;
import org.apache.htrace.core.AlwaysSampler;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.TraceScope;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
/**
* Integration test that should benchmark how fast HBase can recover from failures. This test starts
* different threads:
@ -268,7 +266,7 @@ public class IntegrationTestMTTR {
loadTool = null;
}
private static boolean tablesOnMaster() {
boolean ret = true;
String value = util.getConfiguration().get("hbase.balancer.tablesOnMaster");
@ -369,7 +367,7 @@ public class IntegrationTestMTTR {
*/
private static class TimingResult {
DescriptiveStatistics stats = new DescriptiveStatistics();
ArrayList<Long> traces = new ArrayList<>(10);
ArrayList<String> traces = new ArrayList<>(10);
/**
* Add a result to this aggregate result.
@ -377,9 +375,12 @@ public class IntegrationTestMTTR {
* @param span Span. To be kept if the time taken was over 1 second
*/
public void addResult(long time, Span span) {
if (span == null) {
return;
}
stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS));
if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) {
traces.add(span.getTraceId());
traces.add(span.getTracerId());
}
}
@ -419,12 +420,15 @@ public class IntegrationTestMTTR {
final int maxIterations = 10;
int numAfterDone = 0;
int resetCount = 0;
TraceUtil.addSampler(AlwaysSampler.INSTANCE);
// Keep trying until the rs is back up and we've gotten a put through
while (numAfterDone < maxIterations) {
long start = System.nanoTime();
TraceScope scope = null;
try {
scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE);
Span span = null;
try (TraceScope scope = TraceUtil.createTrace(getSpanName())) {
if (scope != null) {
span = scope.getSpan();
}
boolean actionResult = doAction();
if (actionResult && future.isDone()) {
numAfterDone++;
@ -470,12 +474,8 @@ public class IntegrationTestMTTR {
LOG.info("Too many unexpected Exceptions. Aborting.", e);
throw e;
}
} finally {
if (scope != null) {
scope.close();
}
}
result.addResult(System.nanoTime() - start, scope.getSpan());
result.addResult(System.nanoTime() - start, span);
}
return result;
}

View File

@ -35,9 +35,8 @@ import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Sampler;
import org.apache.htrace.core.TraceScope;
import org.junit.Test;
import org.junit.experimental.categories.Category;
@ -117,13 +116,12 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
for (int i = 0; i < 100; i++) {
Runnable runnable = new Runnable() {
private TraceScope innerScope = null;
private final LinkedBlockingQueue<Long> rowKeyQueue = rks;
@Override
public void run() {
ResultScanner rs = null;
try {
innerScope = Trace.startSpan("Scan", Sampler.ALWAYS);
TraceUtil.addSampler(Sampler.ALWAYS);
try (TraceScope scope = TraceUtil.createTrace("Scan")){
Table ht = util.getConnection().getTable(tableName);
Scan s = new Scan();
s.setStartRow(Bytes.toBytes(rowKeyQueue.take()));
@ -137,20 +135,15 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
accum |= Bytes.toLong(r.getRow());
}
innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum);
TraceUtil.addTimelineAnnotation("Accum result = " + accum);
ht.close();
ht = null;
} catch (IOException e) {
e.printStackTrace();
innerScope.getSpan().addKVAnnotation(
Bytes.toBytes("exception"),
Bytes.toBytes(e.getClass().getSimpleName()));
TraceUtil.addKVAnnotation("exception", e.getClass().getSimpleName());
} catch (Exception e) {
} finally {
if (innerScope != null) innerScope.close();
if (rs != null) rs.close();
}
@ -165,7 +158,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
throws IOException {
for (int i = 0; i < 100; i++) {
Runnable runnable = new Runnable() {
private TraceScope innerScope = null;
private final LinkedBlockingQueue<Long> rowKeyQueue = rowKeys;
@Override
@ -180,9 +172,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
}
long accum = 0;
TraceUtil.addSampler(Sampler.ALWAYS);
for (int x = 0; x < 5; x++) {
try {
innerScope = Trace.startSpan("gets", Sampler.ALWAYS);
try (TraceScope scope = TraceUtil.createTrace("gets")) {
long rk = rowKeyQueue.take();
Result r1 = ht.get(new Get(Bytes.toBytes(rk)));
if (r1 != null) {
@ -192,14 +184,10 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
if (r2 != null) {
accum |= Bytes.toLong(r2.getRow());
}
innerScope.getSpan().addTimelineAnnotation("Accum = " + accum);
TraceUtil.addTimelineAnnotation("Accum = " + accum);
} catch (IOException e) {
} catch (IOException|InterruptedException ie) {
// IGNORED
} catch (InterruptedException ie) {
// IGNORED
} finally {
if (innerScope != null) innerScope.close();
}
}
@ -210,25 +198,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
}
private void createTable() throws IOException {
TraceScope createScope = null;
try {
createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
TraceUtil.addSampler(Sampler.ALWAYS);
try (TraceScope scope = TraceUtil.createTrace("createTable")) {
util.createTable(tableName, familyName);
} finally {
if (createScope != null) createScope.close();
}
}
private void deleteTable() throws IOException {
TraceScope deleteScope = null;
try {
TraceUtil.addSampler(Sampler.ALWAYS);
try (TraceScope scope = TraceUtil.createTrace("deleteTable")) {
if (admin.tableExists(tableName)) {
deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
util.deleteTable(tableName);
}
} finally {
if (deleteScope != null) deleteScope.close();
}
}
@ -236,9 +217,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
byte[] value = new byte[300];
TraceUtil.addSampler(Sampler.ALWAYS);
for (int x = 0; x < 5000; x++) {
TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
try {
try (TraceScope traceScope = TraceUtil.createTrace("insertData")) {
for (int i = 0; i < 5; i++) {
long rk = random.nextLong();
rowKeys.add(rk);
@ -252,8 +233,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
if ((x % 1000) == 0) {
admin.flush(tableName);
}
} finally {
traceScope.close();
}
}
admin.flush(tableName);

View File

@ -181,7 +181,7 @@
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
@ -246,6 +246,10 @@
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -331,6 +335,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
@ -377,6 +385,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -415,6 +427,12 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
@ -439,11 +457,23 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<!--maven dependency:analyze says not needed but tests fail w/o-->
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -818,7 +818,7 @@ public class TableMapReduceUtil {
com.google.protobuf.Message.class,
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.class,
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
org.apache.htrace.Trace.class,
org.apache.htrace.core.Tracer.class,
com.codahale.metrics.MetricRegistry.class,
org.apache.commons.lang3.ArrayUtils.class,
com.fasterxml.jackson.databind.ObjectMapper.class,

View File

@ -31,10 +31,10 @@ import java.util.Date;
import java.util.LinkedList;
import java.util.Locale;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Queue;
import java.util.Random;
import java.util.TreeMap;
import java.util.NoSuchElementException;
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
@ -48,7 +48,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.AsyncConnection;
@ -81,9 +80,17 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
import org.apache.hadoop.hbase.regionserver.BloomType;
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.util.*;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.Hash;
import org.apache.hadoop.hbase.util.MurmurHash;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.YammerHistogramUtils;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.Job;
@ -93,17 +100,15 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.impl.ProbabilitySampler;
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.htrace.core.ProbabilitySampler;
import org.apache.htrace.core.Sampler;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import com.codahale.metrics.Histogram;
import com.codahale.metrics.UniformReservoir;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.MapperFeature;
import com.fasterxml.jackson.databind.ObjectMapper;
/**
* Script used evaluating HBase performance and scalability. Runs a HBase
@ -1034,7 +1039,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
protected final TestOptions opts;
private final Status status;
private final Sampler<?> traceSampler;
private final Sampler traceSampler;
private final SpanReceiverHost receiverHost;
private String testName;
@ -1182,17 +1187,15 @@ public class PerformanceEvaluation extends Configured implements Tool {
void testTimed() throws IOException, InterruptedException {
int startRow = getStartRow();
int lastRow = getLastRow();
TraceUtil.addSampler(traceSampler);
// Report on completion of 1/10th of total.
for (int ii = 0; ii < opts.cycles; ii++) {
if (opts.cycles > 1) LOG.info("Cycle=" + ii + " of " + opts.cycles);
for (int i = startRow; i < lastRow; i++) {
if (i % everyN != 0) continue;
long startTime = System.nanoTime();
TraceScope scope = Trace.startSpan("test row", traceSampler);
try {
try (TraceScope scope = TraceUtil.createTrace("test row");){
testRow(i);
} finally {
scope.close();
}
if ( (i - startRow) > opts.measureAfter) {
// If multiget is enabled, say set to 10, testRow() returns immediately first 9 times

View File

@ -164,6 +164,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -230,6 +230,10 @@
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core4</artifactId>
</dependency>
</dependencies>
<profiles>
<!-- Skip the tests in this module -->

View File

@ -165,6 +165,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
@ -229,6 +233,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -202,6 +202,10 @@
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop-compat</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
@ -387,6 +391,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -426,6 +436,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>

View File

@ -202,6 +202,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>net.java.dev.jets3t</groupId>
<artifactId>jets3t</artifactId>
@ -265,6 +269,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
</profile>

View File

@ -538,9 +538,14 @@
</dependency>
<!-- tracing Dependencies -->
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core4</artifactId>
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<version>${htrace-hadoop.version}</version>
</dependency>
<dependency>
<groupId>com.lmax</groupId>

View File

@ -23,11 +23,12 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.Server;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.apache.yetus.audience.InterfaceAudience;
/**
* Abstract base class for all HBase event handlers. Subclasses should
@ -74,7 +75,7 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
* Default base class constructor.
*/
public EventHandler(Server server, EventType eventType) {
this.parent = Trace.currentSpan();
this.parent = Tracer.getCurrentSpan();
this.server = server;
this.eventType = eventType;
seqid = seqids.incrementAndGet();
@ -99,13 +100,10 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
@Override
public void run() {
TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
try {
try (TraceScope scope = TraceUtil.createTrace(this.getClass().getSimpleName(), parent)) {
process();
} catch(Throwable t) {
handleException(t);
} finally {
chunk.close();
}
}

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ByteBufferKeyValue;
import org.apache.hadoop.hbase.SizeCachedKeyValue;
import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.fs.HFileSystem;
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
@ -59,8 +60,7 @@ import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.IdLock;
import org.apache.hadoop.hbase.util.ObjectIntPair;
import org.apache.hadoop.io.WritableUtils;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
@ -255,6 +255,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// Prefetch file blocks upon open if requested
if (cacheConf.shouldPrefetchOnOpen()) {
PrefetchExecutor.request(path, new Runnable() {
@Override
public void run() {
long offset = 0;
long end = 0;
@ -436,6 +437,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
* @return the total heap size of data and meta block indexes in bytes. Does
* not take into account non-root blocks of a multilevel data index.
*/
@Override
public long indexSize() {
return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize()
@ -1239,6 +1241,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
}
}
@Override
public Path getPath() {
return path;
}
@ -1276,10 +1279,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
protected boolean decodeMemstoreTS = false;
@Override
public boolean isDecodeMemStoreTS() {
return this.decodeMemstoreTS;
}
@Override
public boolean shouldIncludeMemStoreTS() {
return includesMemstoreTS;
}
@ -1437,8 +1442,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
boolean useLock = false;
IdLock.Entry lockEntry = null;
TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock");
try {
try (TraceScope traceScope = TraceUtil.createTrace("HFileReaderImpl.readBlock")) {
while (true) {
// Check cache for block. If found return.
if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
@ -1453,9 +1457,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
if (LOG.isTraceEnabled()) {
LOG.trace("From Cache " + cachedBlock);
}
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
}
TraceUtil.addTimelineAnnotation("blockCacheHit");
assert cachedBlock.isUnpacked() : "Packed block leak.";
if (cachedBlock.getBlockType().isData()) {
if (updateCacheMetrics) {
@ -1481,9 +1483,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
// Carry on, please load.
}
if (Trace.isTracing()) {
traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
}
TraceUtil.addTimelineAnnotation("blockCacheMiss");
// Load block from filesystem.
HFileBlock hfileBlock =
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction);
@ -1505,7 +1505,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return unpacked;
}
} finally {
traceScope.close();
if (lockEntry != null) {
offsetLock.releaseLockEntry(lockEntry);
}
@ -1568,6 +1567,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
close(cacheConf.shouldEvictOnClose());
}
@Override
public void close(boolean evictOnClose) throws IOException {
PrefetchExecutor.cancel(path);
if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
@ -1580,11 +1580,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
fsBlockReader.closeStreams();
}
@Override
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
return dataBlockEncoder.getEffectiveEncodingInCache(isCompaction);
}
/** For testing */
@Override
public HFileBlock.FSReader getUncachedBlockReader() {
return fsBlockReader;
}
@ -1612,6 +1614,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return curBlock != null;
}
@Override
public void setNonSeekedState() {
reset();
}
@ -1713,6 +1716,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
}
}
@Override
protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock));
}
@ -1730,6 +1734,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
return seeker.seekToKeyInBlock(key, seekBefore);
}
@Override
public int compareKey(CellComparator comparator, Cell key) {
return seeker.compareKey(comparator, key);
}
@ -1776,6 +1781,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
* Returns false if block prefetching was requested for this file and has
* not completed, true otherwise
*/
@Override
@VisibleForTesting
public boolean prefetchComplete() {
return PrefetchExecutor.isCompleted(path);

View File

@ -24,6 +24,7 @@ import java.util.Optional;
import org.apache.hadoop.hbase.CallDroppedException;
import org.apache.hadoop.hbase.CellScanner;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.yetus.audience.InterfaceStability;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
@ -32,8 +33,6 @@ import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
/**
* The request processing logic, which is usually executed in thread pools provided by an
@ -116,20 +115,17 @@ public class CallRunner {
String error = null;
Pair<Message, CellScanner> resultPair = null;
RpcServer.CurCall.set(call);
TraceScope traceScope = null;
try {
if (!this.rpcServer.isStarted()) {
InetSocketAddress address = rpcServer.getListenerAddress();
throw new ServerNotRunningYetException("Server " +
(address != null ? address : "(channel closed)") + " is not running yet");
}
if (call.getTraceInfo() != null) {
String serviceName =
call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
String traceString = serviceName + "." + methodName;
traceScope = Trace.startSpan(traceString, call.getTraceInfo());
}
String serviceName =
call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
String traceString = serviceName + "." + methodName;
TraceUtil.createTrace(traceString);
// make the call
resultPair = this.rpcServer.call(call, this.status);
} catch (TimeoutIOException e){
@ -150,9 +146,6 @@ public class CallRunner {
throw (Error)e;
}
} finally {
if (traceScope != null) {
traceScope.close();
}
RpcServer.CurCall.set(null);
if (resultPair != null) {
this.rpcServer.addCallSize(call.getSize() * -1);

View File

@ -181,7 +181,7 @@ public class NettyRpcServer extends RpcServer {
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
long startTime, int timeout) throws IOException {
NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null,
-1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
-1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
return call(fakeCall, status);
}
}

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.htrace.TraceInfo;
/**
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
@ -40,9 +39,9 @@ class NettyServerCall extends ServerCall<NettyServerRpcConnection> {
NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size,
TraceInfo tinfo, InetAddress remoteAddress, long receiveTime, int timeout,
InetAddress remoteAddress, long receiveTime, int timeout,
ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
}

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.htrace.TraceInfo;
/**
* RpcConnection implementation for netty rpc server.
@ -119,9 +118,9 @@ class NettyServerRpcConnection extends ServerRpcConnection {
@Override
public NettyServerCall createCall(int id, final BlockingService service,
final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner,
long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout,
long size, final InetAddress remoteAddress, int timeout,
CallCleanup reqCleanup) {
return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
this.rpcServer.cellBlockBuilder, reqCleanup);
}

View File

@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.htrace.TraceInfo;
/**
* Interface of all necessary to carry out a RPC method invocation on the server.
@ -133,9 +132,4 @@ public interface RpcCall extends RpcCallContext {
* @return A short string format of this call without possibly lengthy params
*/
String toShortString();
/**
* @return TraceInfo attached to this call.
*/
TraceInfo getTraceInfo();
}

View File

@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeade
import org.apache.hadoop.hbase.util.ByteBufferUtils;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.TraceInfo;
/**
* Datastructure that holds all necessary to a method invocation and then afterward, carries
@ -79,7 +78,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
protected final long size; // size of current call
protected boolean isError;
protected final TraceInfo tinfo;
protected ByteBufferListOutputStream cellBlockStream = null;
protected CallCleanup reqCleanup = null;
@ -96,7 +94,7 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
justification="Can't figure why this complaint is happening... see below")
ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
Message param, CellScanner cellScanner, T connection, long size, TraceInfo tinfo,
Message param, CellScanner cellScanner, T connection, long size,
InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
this.id = id;
@ -110,7 +108,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
this.response = null;
this.isError = false;
this.size = size;
this.tinfo = tinfo;
if (connection != null) {
this.user = connection.user;
this.retryImmediatelySupported = connection.retryImmediatelySupported;
@ -506,11 +503,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
return connection.getRemotePort();
}
@Override
public TraceInfo getTraceInfo() {
return tinfo;
}
@Override
public synchronized BufferChain getResponse() {
return response;

View File

@ -77,7 +77,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.htrace.TraceInfo;
/** Reads calls from a connection and queues them for handling. */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
@ -632,7 +631,7 @@ abstract class ServerRpcConnection implements Closeable {
if ((totalRequestSize +
this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
final ServerCall<?> callTooBig = createCall(id, this.service, null, null, null, null,
totalRequestSize, null, null, 0, this.callCleanup);
totalRequestSize, null, 0, this.callCleanup);
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
"Call queue is full on " + this.rpcServer.server.getServerName() +
@ -694,21 +693,18 @@ abstract class ServerRpcConnection implements Closeable {
}
ServerCall<?> readParamsFailedCall = createCall(id, this.service, null, null, null, null,
totalRequestSize, null, null, 0, this.callCleanup);
totalRequestSize, null, 0, this.callCleanup);
readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage());
readParamsFailedCall.sendResponseIfReady();
return;
}
TraceInfo traceInfo = header.hasTraceInfo() ? new TraceInfo(header
.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
: null;
int timeout = 0;
if (header.hasTimeout() && header.getTimeout() > 0) {
timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
}
ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize,
traceInfo, this.addr, timeout, this.callCleanup);
this.addr, timeout, this.callCleanup);
if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
@ -790,7 +786,7 @@ abstract class ServerRpcConnection implements Closeable {
public abstract boolean isConnectionOpen();
public abstract ServerCall<?> createCall(int id, BlockingService service, MethodDescriptor md,
RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
RequestHeader header, Message param, CellScanner cellScanner, long size,
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup);
private static class ByteBuffByteInput extends ByteInput {

View File

@ -489,7 +489,7 @@ public class SimpleRpcServer extends RpcServer {
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
long startTime, int timeout) throws IOException {
SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner,
null, -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
null, -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
return call(fakeCall, status);
}

View File

@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.htrace.TraceInfo;
/**
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
@ -43,10 +42,10 @@ class SimpleServerCall extends ServerCall<SimpleServerRpcConnection> {
justification = "Can't figure why this complaint is happening... see below")
SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md,
RequestHeader header, Message param, CellScanner cellScanner,
SimpleServerRpcConnection connection, long size, TraceInfo tinfo,
SimpleServerRpcConnection connection, long size,
final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) {
super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
this.responder = responder;
}

View File

@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.htrace.TraceInfo;
/** Reads calls from a connection and queues them for handling. */
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT",
@ -212,7 +211,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
// Notify the client about the offending request
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
null, null, null, this, 0, null, this.addr, System.currentTimeMillis(), 0,
null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder);
this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
// Make sure the client recognizes the underlying exception
@ -343,9 +342,9 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
@Override
public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md,
RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
RequestHeader header, Message param, CellScanner cellScanner, long size,
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
}

View File

@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
import org.apache.hadoop.hbase.replication.regionserver.Replication;
import org.apache.hadoop.hbase.security.AccessDeniedException;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
@ -470,6 +471,7 @@ public class HMaster extends HRegionServer implements MasterServices {
public HMaster(final Configuration conf)
throws IOException, KeeperException {
super(conf);
TraceUtil.initTracer(conf);
try {
this.rsFatals = new MemoryBoundedLogMessageBuffer(
conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
import org.apache.hadoop.hbase.MasterNotRunningException;
import org.apache.hadoop.hbase.ZNodeClearer;
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
@ -147,6 +148,8 @@ public class HMasterCommandLine extends ServerCommandLine {
private int startMaster() {
Configuration conf = getConf();
TraceUtil.initTracer(conf);
try {
// If 'local', defer to LocalHBaseCluster instance. Starts master
// and regionserver both in the one JVM.

View File

@ -91,11 +91,11 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.KeyValueUtil;
import org.apache.hadoop.hbase.NamespaceDescriptor;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.PrivateCellUtil;
import org.apache.hadoop.hbase.RegionTooBusyException;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Tag;
@ -149,33 +149,6 @@ import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.EncryptionTest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HashedBytes;
import org.apache.hadoop.hbase.util.NonceKey;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
@ -198,6 +171,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescript
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CancelableProgressable;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.util.CompressionTest;
import org.apache.hadoop.hbase.util.EncryptionTest;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HashedBytes;
import org.apache.hadoop.hbase.util.NonceKey;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WAL;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALFactory;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
import org.apache.hadoop.io.MultipleIOException;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import edu.umd.cs.findbugs.annotations.Nullable;
@ -3727,6 +3726,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce));
}
@Override
public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
}
@ -5560,16 +5560,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
RowLockContext rowLockContext = null;
RowLockImpl result = null;
TraceScope traceScope = null;
// If we're tracing start a span to show how long this took.
if (Trace.isTracing()) {
traceScope = Trace.startSpan("HRegion.getRowLock");
traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
}
boolean success = false;
try {
try (TraceScope scope = TraceUtil.createTrace("HRegion.getRowLock")) {
TraceUtil.addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
// Keep trying until we have a lock or error out.
// TODO: do we need to add a time component here?
while (result == null) {
@ -5598,9 +5592,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
if (traceScope != null) {
traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
}
TraceUtil.addTimelineAnnotation("Failed to get row lock");
result = null;
String message = "Timed out waiting for lock for row: " + rowKey + " in region "
+ getRegionInfo().getEncodedName();
@ -5618,9 +5610,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
InterruptedIOException iie = new InterruptedIOException();
iie.initCause(ie);
if (traceScope != null) {
traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
}
TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock");
Thread.currentThread().interrupt();
throw iie;
} finally {
@ -5628,9 +5618,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
if (!success && rowLockContext != null) {
rowLockContext.cleanUp();
}
if (traceScope != null) {
traceScope.close();
}
}
}

View File

@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.security.Superusers;
import org.apache.hadoop.hbase.security.User;
import org.apache.hadoop.hbase.security.UserProvider;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CompressionTest;
@ -526,6 +527,7 @@ public class HRegionServer extends HasThread implements
// Defer till after we register with the Master as much as possible. See #startServices.
public HRegionServer(Configuration conf) throws IOException {
super("RegionServer"); // thread name
TraceUtil.initTracer(conf);
try {
this.startcode = System.currentTimeMillis();
this.conf = conf;

View File

@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
@ -50,6 +51,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
private int start() throws Exception {
Configuration conf = getConf();
TraceUtil.initTracer(conf);
try {
// If 'local', don't start a region server here. Defer to
// LocalHBaseCluster. It manages 'local' clusters.

View File

@ -44,6 +44,8 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.HasThread;
@ -51,12 +53,9 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
/**
* Thread that flushes cache on request
*
@ -447,7 +446,7 @@ class MemStoreFlusher implements FlushRequester {
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
if (!this.server.compactSplitThread.requestSplit(region)) {
try {
this.server.compactSplitThread.requestSystemCompaction((HRegion) region,
this.server.compactSplitThread.requestSystemCompaction(region,
Thread.currentThread().getName());
} catch (IOException e) {
e = e instanceof RemoteException ?
@ -572,12 +571,10 @@ class MemStoreFlusher implements FlushRequester {
* amount of memstore consumption.
*/
public void reclaimMemStoreMemory() {
TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
TraceScope scope = TraceUtil.createTrace("MemStoreFluser.reclaimMemStoreMemory");
FlushType flushType = isAboveHighWaterMark();
if (flushType != FlushType.NORMAL) {
if (Trace.isTracing()) {
scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
}
TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
long start = EnvironmentEdgeManager.currentTime();
synchronized (this.blockSignal) {
boolean blocked = false;
@ -640,7 +637,9 @@ class MemStoreFlusher implements FlushRequester {
} else if (isAboveLowWaterMark() != FlushType.NORMAL) {
wakeupFlushThread();
}
scope.close();
if(scope!= null) {
scope.close();
}
}
private void logMsg(String string1, long val, long max) {

View File

@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.util.CommonFSUtils;
@ -72,14 +74,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.util.StringUtils;
import org.apache.htrace.NullScope;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
import com.lmax.disruptor.RingBuffer;
/**
@ -681,8 +679,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
* @throws IOException if there is a problem flushing or closing the underlying FS
*/
Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
try {
try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) {
long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter);
int oldNumEntries = this.numEntries.getAndSet(0);
final String newPathString = (null == newPath ? null : CommonFSUtils.getPath(newPath));
@ -696,16 +693,16 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
LOG.info("New WAL " + newPathString);
}
return newPath;
} finally {
scope.close();
}
}
protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
try {
syncFuture.get(walSyncTimeoutNs);
return syncFuture.getSpan();
if (syncFuture != null) {
syncFuture.get(walSyncTimeoutNs);
}
return (syncFuture == null) ? null : syncFuture.getSpan();
} catch (TimeoutIOException tioe) {
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
// still refer to it, so if this thread use it next time may get a wrong
@ -748,8 +745,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
LOG.debug("WAL closing. Skipping rolling of writer");
return regionsToFlush;
}
TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
try {
try (TraceScope scope = TraceUtil.createTrace("FSHLog.rollWriter")) {
Path oldPath = getOldPath();
Path newPath = getNewPath();
// Any exception from here on is catastrophic, non-recoverable so we currently abort.
@ -774,8 +770,6 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
"for details.", exception);
} finally {
closeBarrier.endOp();
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close();
}
return regionsToFlush;
} finally {
@ -950,7 +944,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
if (timeInNanos > this.slowSyncNs) {
String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000)
.append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString();
Trace.addTimelineAnnotation(msg);
TraceUtil.addTimelineAnnotation(msg);
LOG.info(msg);
}
if (!listeners.isEmpty()) {
@ -966,16 +960,20 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
if (this.closed) {
throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
}
TraceScope scope = Trace.startSpan(implClassName + ".append");
MutableLong txidHolder = new MutableLong();
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
txidHolder.setValue(ringBuffer.next());
});
long txid = txidHolder.longValue();
try {
try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
entry.stampRegionSequenceId(we);
ringBuffer.get(txid).load(entry, scope.detach());
if(scope!=null){
ringBuffer.get(txid).load(entry, scope.getSpan());
}
else{
ringBuffer.get(txid).load(entry, null);
}
} finally {
ringBuffer.publish(txid);
}

View File

@ -44,6 +44,8 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.ConnectionUtils;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
@ -52,18 +54,14 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.htrace.NullScope;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
import org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.TraceScope;
import com.lmax.disruptor.RingBuffer;
import com.lmax.disruptor.Sequence;
@ -342,9 +340,9 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
}
private void addTimeAnnotation(SyncFuture future, String annotation) {
TraceScope scope = Trace.continueSpan(future.getSpan());
Trace.addTimelineAnnotation(annotation);
future.setSpan(scope.detach());
TraceUtil.addTimelineAnnotation(annotation);
//TODO handle htrace API change, see HBASE-18895
//future.setSpan(scope.getSpan());
}
private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
@ -415,14 +413,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
Span span = entry.detachSpan();
// the span maybe null if this is a retry after rolling.
if (span != null) {
TraceScope scope = Trace.continueSpan(span);
//TODO handle htrace API change, see HBASE-18895
//TraceScope scope = Trace.continueSpan(span);
try {
appended = append(writer, entry);
} catch (IOException e) {
throw new AssertionError("should not happen", e);
} finally {
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close(); // append scope is complete
//TODO handle htrace API change, see HBASE-18895
//assert scope == NullScope.INSTANCE || !scope.isDetached();
//scope.close(); // append scope is complete
}
} else {
try {
@ -559,24 +559,26 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
@Override
public void sync() throws IOException {
TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
try {
try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")){
long txid = waitingConsumePayloads.next();
SyncFuture future;
SyncFuture future = null;
try {
future = getSyncFuture(txid, scope.detach());
RingBufferTruck truck = waitingConsumePayloads.get(txid);
truck.load(future);
if (scope != null) {
future = getSyncFuture(txid, scope.getSpan());
RingBufferTruck truck = waitingConsumePayloads.get(txid);
truck.load(future);
}
} finally {
waitingConsumePayloads.publish(txid);
}
if (shouldScheduleConsumer()) {
eventLoop.execute(consumer);
}
scope = Trace.continueSpan(blockOnSync(future));
} finally {
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close();
//TODO handle htrace API change, see HBASE-18895
//scope = Trace.continueSpan(blockOnSync(future));
if (future != null) {
blockOnSync(future);
}
}
}
@ -585,25 +587,27 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
if (highestSyncedTxid.get() >= txid) {
return;
}
TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
try {
try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")) {
// here we do not use ring buffer sequence as txid
long sequence = waitingConsumePayloads.next();
SyncFuture future;
SyncFuture future = null;
try {
future = getSyncFuture(txid, scope.detach());
RingBufferTruck truck = waitingConsumePayloads.get(sequence);
truck.load(future);
if(scope!= null) {
future = getSyncFuture(txid, scope.getSpan());
RingBufferTruck truck = waitingConsumePayloads.get(sequence);
truck.load(future);
}
} finally {
waitingConsumePayloads.publish(sequence);
}
if (shouldScheduleConsumer()) {
eventLoop.execute(consumer);
}
scope = Trace.continueSpan(blockOnSync(future));
} finally {
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close();
//TODO handle htrace API change, see HBASE-18895
//scope = Trace.continueSpan(blockOnSync(future));
if (future != null) {
blockOnSync(future);
}
}
}

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
@ -54,10 +55,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
import org.apache.hadoop.hdfs.DFSOutputStream;
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.htrace.NullScope;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.TraceScope;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
@ -345,7 +344,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
// use assert to make sure no change breaks the logic that
// sequence and zigzagLatch will be set together
assert sequence > 0L : "Failed to get sequence from ring buffer";
Trace.addTimelineAnnotation("awaiting safepoint");
TraceUtil.addTimelineAnnotation("awaiting safepoint");
syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence));
}
} catch (FailedSyncBeforeLogCloseException e) {
@ -361,9 +360,9 @@ public class FSHLog extends AbstractFSWAL<Writer> {
if (this.writer != null) {
oldFileLen = this.writer.getLength();
try {
Trace.addTimelineAnnotation("closing writer");
TraceUtil.addTimelineAnnotation("closing writer");
this.writer.close();
Trace.addTimelineAnnotation("writer closed");
TraceUtil.addTimelineAnnotation("writer closed");
this.closeErrorCount.set(0);
} catch (IOException ioe) {
int errors = closeErrorCount.incrementAndGet();
@ -595,13 +594,14 @@ public class FSHLog extends AbstractFSWAL<Writer> {
}
// I got something. Lets run. Save off current sequence number in case it changes
// while we run.
TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
//TODO handle htrace API change, see HBASE-18895
//TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
long start = System.nanoTime();
Throwable lastException = null;
try {
Trace.addTimelineAnnotation("syncing writer");
TraceUtil.addTimelineAnnotation("syncing writer");
writer.sync();
Trace.addTimelineAnnotation("writer synced");
TraceUtil.addTimelineAnnotation("writer synced");
currentSequence = updateHighestSyncedSequence(currentSequence);
} catch (IOException e) {
LOG.error("Error syncing, request close of WAL", e);
@ -611,7 +611,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
lastException = e;
} finally {
// reattach the span to the future before releasing.
takeSyncFuture.setSpan(scope.detach());
//TODO handle htrace API change, see HBASE-18895
// takeSyncFuture.setSpan(scope.getSpan());
// First release what we 'took' from the queue.
syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException);
// Can we release other syncs?
@ -727,8 +728,15 @@ public class FSHLog extends AbstractFSWAL<Writer> {
}
// Sync all known transactions
private Span publishSyncThenBlockOnCompletion(Span span) throws IOException {
return blockOnSync(publishSyncOnRingBuffer(span));
private void publishSyncThenBlockOnCompletion(TraceScope scope) throws IOException {
if (scope != null) {
SyncFuture syncFuture = publishSyncOnRingBuffer(scope.getSpan());
blockOnSync(syncFuture);
}
else {
SyncFuture syncFuture = publishSyncOnRingBuffer(null);
blockOnSync(syncFuture);
}
}
/**
@ -754,12 +762,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
@Override
public void sync() throws IOException {
TraceScope scope = Trace.startSpan("FSHLog.sync");
try {
scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
} finally {
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close();
try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
publishSyncThenBlockOnCompletion(scope);
}
}
@ -769,12 +773,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
// Already sync'd.
return;
}
TraceScope scope = Trace.startSpan("FSHLog.sync");
try {
scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
} finally {
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close();
try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
publishSyncThenBlockOnCompletion(scope);
}
}
@ -996,7 +996,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
}
} else if (truck.type() == RingBufferTruck.Type.APPEND) {
FSWALEntry entry = truck.unloadAppend();
TraceScope scope = Trace.continueSpan(entry.detachSpan());
//TODO handle htrace API change, see HBASE-18895
//TraceScope scope = Trace.continueSpan(entry.detachSpan());
try {
if (this.exception != null) {
@ -1015,9 +1016,6 @@ public class FSHLog extends AbstractFSWAL<Writer> {
: new DamagedWALException("On sync", this.exception));
// Return to keep processing events coming off the ringbuffer
return;
} finally {
assert scope == NullScope.INSTANCE || !scope.isDetached();
scope.close(); // append scope is complete
}
} else {
// What is this if not an append or sync. Fail all up to this!!!

View File

@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.CollectionUtils;
import org.apache.hadoop.hbase.wal.WAL.Entry;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.hadoop.hbase.wal.WALKey;
import org.apache.htrace.Span;
import org.apache.htrace.core.Span;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;

View File

@ -18,8 +18,8 @@
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.htrace.core.Span;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.htrace.Span;
/**
* A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a

View File

@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
import org.apache.htrace.Span;
import org.apache.htrace.core.Span;
/**
* A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the

View File

@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate;
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.BufferedMutator;
@ -657,6 +658,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
setLevel(org.apache.log4j.Level.ERROR);
TraceUtil.initTracer(conf);
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
true, null, null, hosts, null);
@ -1125,6 +1127,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
}
Configuration c = new Configuration(this.conf);
TraceUtil.initTracer(c);
this.hbaseCluster =
new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
// Don't leave here till we've done a successful scan of the hbase:meta

View File

@ -563,7 +563,7 @@ public class TestSimpleRpcScheduler {
ServerCall putCall = new ServerCall(1, null, null,
RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(),
RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))),
null, null, 9, null, null, timestamp, 0, null, null, null) {
null, null, 9, null, timestamp, 0, null, null, null) {
@Override
public void sendResponseIfReady() throws IOException {

View File

@ -18,25 +18,21 @@
package org.apache.hadoop.hbase.trace;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertTrue;
import java.lang.reflect.Method;
import java.util.Collection;
import org.apache.hadoop.hbase.HBaseTestingUtility;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.Waiter;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
import org.apache.hadoop.hbase.testclassification.MediumTests;
import org.apache.hadoop.hbase.testclassification.MiscTests;
import org.apache.htrace.Sampler;
import org.apache.htrace.Span;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.TraceTree;
import org.apache.htrace.impl.POJOSpanReceiver;
import org.apache.htrace.core.POJOSpanReceiver;
import org.apache.htrace.core.Sampler;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.SpanId;
import org.apache.htrace.core.TraceScope;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Rule;
@ -44,103 +40,84 @@ import org.junit.Test;
import org.junit.experimental.categories.Category;
import org.junit.rules.TestName;
import java.util.Collection;
import java.util.LinkedList;
import java.util.List;
@Category({MiscTests.class, MediumTests.class})
public class TestHTraceHooks {
private static final byte[] FAMILY_BYTES = "family".getBytes();
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
private static POJOSpanReceiver rcvr;
private static long ROOT_SPAN_ID = 0;
private static SpanId ROOT_SPAN_ID = new SpanId(0, 0);
@Rule
public TestName name = new TestName();
@BeforeClass
public static void before() throws Exception {
// Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32
// to find out to detect if we are using HTrace 3.2 or not.
try {
Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class);
} catch (NoSuchMethodException e) {
ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2
}
TEST_UTIL.startMiniCluster(2, 3);
rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
Trace.addReceiver(rcvr);
TraceUtil.addReceiver(rcvr);
TraceUtil.addSampler(new Sampler() {
@Override
public boolean next() {
return true;
}
});
}
@AfterClass
public static void after() throws Exception {
TEST_UTIL.shutdownMiniCluster();
Trace.removeReceiver(rcvr);
TraceUtil.removeReceiver(rcvr);
rcvr = null;
}
@Test
public void testTraceCreateTable() throws Exception {
TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
Table table;
try {
Span createTableSpan;
try (TraceScope scope = TraceUtil.createTrace("creating table")) {
createTableSpan = scope.getSpan();
table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES);
} finally {
tableCreationSpan.close();
}
// Some table creation is async. Need to make sure that everything is full in before
// checking to see if the spans are there.
TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
@Override
public boolean evaluate() throws Exception {
return rcvr.getSpans().size() >= 5;
TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
@Override public boolean evaluate() throws Exception {
return (rcvr == null) ? true : rcvr.getSpans().size() >= 5;
}
});
Collection<Span> spans = rcvr.getSpans();
Collection<Span> spans = Sets.newHashSet(rcvr.getSpans());
List<Span> roots = new LinkedList<>();
TraceTree traceTree = new TraceTree(spans);
Collection<Span> roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
roots.addAll(traceTree.getSpansByParent().find(createTableSpan.getSpanId()));
assertEquals(1, roots.size());
Span createTableRoot = roots.iterator().next();
assertEquals(3, roots.size());
assertEquals("creating table", createTableSpan.getDescription());
assertEquals("creating table", createTableRoot.getDescription());
int createTableCount = 0;
for (Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) {
if (s.getDescription().startsWith("MasterService.CreateTable")) {
createTableCount++;
}
if (spans != null) {
assertTrue(spans.size() > 5);
}
assertTrue(createTableCount >= 1);
assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3);
assertTrue(spans.size() > 5);
Put put = new Put("row".getBytes());
put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
try {
Span putSpan;
try (TraceScope scope = TraceUtil.createTrace("doing put")) {
putSpan = scope.getSpan();
table.put(put);
} finally {
putSpan.close();
}
spans = rcvr.getSpans();
traceTree = new TraceTree(spans);
roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
assertEquals(2, roots.size());
Span putRoot = null;
for (Span root : roots) {
if (root.getDescription().equals("doing put")) {
putRoot = root;
}
}
assertNotNull(putRoot);
roots.clear();
roots.addAll(traceTree.getSpansByParent().find(putSpan.getSpanId()));
assertEquals(1, roots.size());
}
}

View File

@ -0,0 +1,134 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.trace;
import org.apache.htrace.core.Span;
import org.apache.htrace.core.SpanId;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;
/**
* Used to create the graph formed by spans.
*/
public class TraceTree {
public static class SpansByParent {
private final Set<Span> set;
private final HashMap<SpanId, LinkedList<Span>> parentToSpans;
SpansByParent(Collection<Span> spans) {
set = new LinkedHashSet<Span>();
parentToSpans = new HashMap<SpanId, LinkedList<Span>>();
if(spans == null) {
return;
}
for (Span span : spans) {
set.add(span);
for (SpanId parent : span.getParents()) {
LinkedList<Span> list = parentToSpans.get(parent);
if (list == null) {
list = new LinkedList<Span>();
parentToSpans.put(parent, list);
}
list.add(span);
}
if (span.getParents().length == 0) {
LinkedList<Span> list = parentToSpans.get(Long.valueOf(0L));
if (list == null) {
list = new LinkedList<Span>();
parentToSpans.put(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE), list);
}
list.add(span);
}
}
}
public List<Span> find(SpanId parentId) {
LinkedList<Span> spans = parentToSpans.get(parentId);
if (spans == null) {
return new LinkedList<Span>();
}
return spans;
}
public Iterator<Span> iterator() {
return Collections.unmodifiableSet(set).iterator();
}
}
public static class SpansByProcessId {
private final Set<Span> set;
SpansByProcessId(Collection<Span> spans) {
set = new LinkedHashSet<Span>();
if(spans == null) {
return;
}
for (Span span : spans) {
set.add(span);
}
}
public Iterator<Span> iterator() {
return Collections.unmodifiableSet(set).iterator();
}
}
private final SpansByParent spansByParent;
private final SpansByProcessId spansByProcessId;
/**
* Create a new TraceTree
*
* @param spans The collection of spans to use to create this TraceTree. Should
* have at least one root span.
*/
public TraceTree(Collection<Span> spans) {
this.spansByParent = new SpansByParent(spans);
this.spansByProcessId = new SpansByProcessId(spans);
}
public SpansByParent getSpansByParent() {
return spansByParent;
}
public SpansByProcessId getSpansByProcessId() {
return spansByProcessId;
}
@Override
public String toString() {
StringBuilder bld = new StringBuilder();
String prefix = "";
for (Iterator<Span> iter = spansByParent.iterator(); iter.hasNext();) {
Span span = iter.next();
bld.append(prefix).append(span.toString());
prefix = "\n";
}
return bld.toString();
}
}

View File

@ -58,16 +58,17 @@ import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
import org.apache.hadoop.hbase.trace.TraceUtil;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.htrace.Sampler;
import org.apache.htrace.Trace;
import org.apache.htrace.TraceScope;
import org.apache.htrace.impl.ProbabilitySampler;
import org.apache.htrace.core.ProbabilitySampler;
import org.apache.htrace.core.Sampler;
import org.apache.htrace.core.TraceScope;
import org.apache.htrace.core.Tracer;
import org.apache.yetus.audience.InterfaceAudience;
import com.codahale.metrics.ConsoleReporter;
@ -172,15 +173,13 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
Random rand = new Random(Thread.currentThread().getId());
WAL wal = region.getWAL();
TraceScope threadScope =
Trace.startSpan("WALPerfEval." + Thread.currentThread().getName());
try {
try (TraceScope threadScope = TraceUtil.createTrace("WALPerfEval." + Thread.currentThread().getName())) {
long startTime = System.currentTimeMillis();
int lastSync = 0;
TraceUtil.addSampler(loopSampler);
for (int i = 0; i < numIterations; ++i) {
assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected.";
TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler);
try {
assert Tracer.getCurrentSpan() == threadScope.getSpan() : "Span leak detected.";
try (TraceScope loopScope = TraceUtil.createTrace("runLoopIter" + i)) {
long now = System.nanoTime();
Put put = setupPut(rand, key, value, numFamilies);
WALEdit walEdit = new WALEdit();
@ -196,16 +195,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
}
}
latencyHistogram.update(System.nanoTime() - now);
} finally {
loopScope.close();
}
}
long totalTime = (System.currentTimeMillis() - startTime);
logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
} catch (Exception e) {
LOG.error(getClass().getSimpleName() + " Thread failed", e);
} finally {
threadScope.close();
}
}
}
@ -315,8 +310,9 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
LOG.info("FileSystem: " + fs);
SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
final Sampler<?> sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
TraceScope scope = Trace.startSpan("WALPerfEval", sampler);
final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
TraceUtil.addSampler(sampler);
TraceScope scope = TraceUtil.createTrace("WALPerfEval");
try {
if (rootRegionDir == null) {
@ -338,8 +334,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
// a table per desired region means we can avoid carving up the key space
final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
syncInterval, traceFreq));
benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
syncInterval, traceFreq), "");
}
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
@ -389,9 +385,15 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
}
} finally {
// We may be called inside a test that wants to keep on using the fs.
if (!noclosefs) fs.close();
scope.close();
if (receiverHost != null) receiverHost.closeReceivers();
if (!noclosefs) {
fs.close();
}
if (scope != null) {
scope.close();
}
if (receiverHost != null) {
receiverHost.closeReceivers();
}
}
return(0);

View File

@ -305,6 +305,12 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -352,6 +358,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -364,6 +374,10 @@
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -375,6 +389,10 @@
<artifactId>hadoop-minicluster</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -429,11 +447,21 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>

View File

@ -16,8 +16,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
#
HTrace = org.apache.htrace.Trace
java_import org.apache.htrace.Sampler
HTrace = org.apache.htrace.core.Tracer
java_import org.apache.htrace.core.Sampler
java_import org.apache.hadoop.hbase.trace.SpanReceiverHost
module Shell

716
hbase-spark/pom.xml Normal file
View File

@ -0,0 +1,716 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
<modelVersion>4.0.0</modelVersion>
<parent>
<artifactId>hbase-build-configuration</artifactId>
<groupId>org.apache.hbase</groupId>
<version>3.0.0-SNAPSHOT</version>
<relativePath>../hbase-build-configuration</relativePath>
</parent>
<artifactId>hbase-spark</artifactId>
<name>Apache HBase - Spark</name>
<properties>
<spark.version>1.6.0</spark.version>
<scala.version>2.10.4</scala.version>
<scala.binary.version>2.10</scala.binary.version>
<top.dir>${project.basedir}/..</top.dir>
</properties>
<dependencies>
<dependency>
<groupId>org.apache.hbase.thirdparty</groupId>
<artifactId>hbase-shaded-miscellaneous</artifactId>
</dependency>
<!-- Force import of Spark's servlet API for unit tests -->
<dependency>
<groupId>javax.servlet</groupId>
<artifactId>javax.servlet-api</artifactId>
<scope>test</scope>
</dependency>
<!-- Mark Spark / Scala as provided -->
<dependency>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
<version>${scala.version}</version>
<scope>provided</scope>
</dependency>
<!-- we exclude jsr305 below and then expressly relist it as
provided / optional to avoid dependency resolution possibly
bringing it back into runtime scope.
-->
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-core_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<scope>provided</scope>
<exclusions>
<exclusion>
<!-- make sure wrong scala version is not pulled in -->
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</exclusion>
<exclusion>
<!-- make sure wrong scala version is not pulled in -->
<groupId>org.scala-lang</groupId>
<artifactId>scalap</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
<version>1.3.9</version>
<scope>provided</scope>
<optional>true</optional>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-sql_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<scope>provided</scope>
</dependency>
<dependency>
<groupId>org.apache.spark</groupId>
<artifactId>spark-streaming_${scala.binary.version}</artifactId>
<version>${spark.version}</version>
<type>test-jar</type>
<classifier>tests</classifier>
<scope>test</scope>
</dependency>
<dependency>
<groupId>junit</groupId>
<artifactId>junit</artifactId>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.scalatest</groupId>
<artifactId>scalatest_${scala.binary.version}</artifactId>
<version>2.2.4</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.scalamock</groupId>
<artifactId>scalamock-scalatest-support_${scala.binary.version}</artifactId>
<version>3.1.4</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.fasterxml.jackson.module</groupId>
<artifactId>jackson-module-scala_${scala.binary.version}</artifactId>
<version>${jackson.version}</version>
<exclusions>
<exclusion>
<groupId>org.scala-lang</groupId>
<artifactId>scala-library</artifactId>
</exclusion>
<exclusion>
<groupId>org.scala-lang</groupId>
<artifactId>scala-reflect</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-client</artifactId>
<version>${hadoop-two.version}</version>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.jruby</groupId>
<artifactId>jruby-complete</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop-two.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.jruby</groupId>
<artifactId>jruby-complete</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<version>${hadoop-two.version}</version>
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.jruby</groupId>
<artifactId>jruby-complete</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.code.findbugs</groupId>
<artifactId>jsr305</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<version>${hadoop-two.version}</version>
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet</groupId>
<artifactId>servlet-api</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
</exclusion>
<exclusion>
<groupId>org.jruby</groupId>
<artifactId>jruby-complete</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>xerces</groupId>
<artifactId>xercesImpl</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-client</artifactId>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.thrift</groupId>
<artifactId>thrift</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-protocol-shaded</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-common</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-annotations</artifactId>
<version>${project.version}</version>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop-compat</artifactId>
<version>${project.version}</version>
<scope>test</scope>
<type>test-jar</type>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.thrift</groupId>
<artifactId>thrift</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-hadoop2-compat</artifactId>
<version>${project.version}</version>
<scope>test</scope>
<type>test-jar</type>
<exclusions>
<exclusion>
<groupId>log4j</groupId>
<artifactId>log4j</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.thrift</groupId>
<artifactId>thrift</artifactId>
</exclusion>
<exclusion>
<groupId>org.slf4j</groupId>
<artifactId>slf4j-log4j12</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jsp-api-2.1</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>servlet-api-2.5</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
</exclusion>
<exclusion>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty</artifactId>
</exclusion>
<exclusion>
<groupId>org.mortbay.jetty</groupId>
<artifactId>jetty-util</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-runtime</artifactId>
</exclusion>
<exclusion>
<groupId>tomcat</groupId>
<artifactId>jasper-compiler</artifactId>
</exclusion>
<exclusion>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
<exclusion>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${project.version}</version>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-server</artifactId>
<version>${project.version}</version>
<scope>test</scope>
<type>test-jar</type>
</dependency>
<dependency>
<groupId>org.apache.hbase</groupId>
<artifactId>hbase-mapreduce</artifactId>
</dependency>
<dependency>
<groupId>com.google.protobuf</groupId>
<artifactId>protobuf-java</artifactId>
</dependency>
<dependency>
<groupId>commons-io</groupId>
<artifactId>commons-io</artifactId>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
<type>test-jar</type>
<scope>test</scope>
</dependency>
<dependency>
<groupId>org.apache.avro</groupId>
<artifactId>avro</artifactId>
</dependency>
</dependencies>
<build>
<plugins>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-compiler-plugin</artifactId>
</plugin>
<plugin>
<groupId>net.alchim31.maven</groupId>
<artifactId>scala-maven-plugin</artifactId>
<version>3.2.0</version>
<configuration>
<charset>${project.build.sourceEncoding}</charset>
<scalaVersion>${scala.version}</scalaVersion>
<args>
<arg>-feature</arg>
</args>
</configuration>
<executions>
<execution>
<id>scala-compile-first</id>
<phase>process-resources</phase>
<goals>
<goal>add-source</goal>
<goal>compile</goal>
</goals>
</execution>
<execution>
<id>scala-test-compile</id>
<phase>process-test-resources</phase>
<goals>
<goal>testCompile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.scalatest</groupId>
<artifactId>scalatest-maven-plugin</artifactId>
<version>1.0</version>
<configuration>
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
<junitxml>.</junitxml>
<filereports>WDF TestSuite.txt</filereports>
<parallel>false</parallel>
<systemProperties>
<org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>org.apache.hadoop.hbase.shaded.</org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>
</systemProperties>
</configuration>
<executions>
<execution>
<id>test</id>
<phase>test</phase>
<goals>
<goal>test</goal>
</goals>
<configuration>
<systemProperties>
<org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>org.apache.hadoop.hbase.shaded.</org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>
</systemProperties>
<argLine> -Xmx1536m -XX:ReservedCodeCacheSize=512m
</argLine>
<parallel>false</parallel>
</configuration>
</execution>
</executions>
</plugin>
<!-- clover fails due to scala/java cross compile. This guarantees that the scala is
compiled before the java that will be evaluated by code coverage (scala will not be).
https://confluence.atlassian.com/display/CLOVERKB/Java-+Scala+cross-compilation+error+-+cannot+find+symbol
-->
<plugin>
<groupId>org.codehaus.mojo</groupId>
<artifactId>build-helper-maven-plugin</artifactId>
<executions>
<execution>
<id>add-source</id>
<phase>validate</phase>
<goals>
<goal>add-source</goal>
</goals>
<configuration>
<sources>
<source>src/main/scala</source>
</sources>
</configuration>
</execution>
<execution>
<id>add-test-source</id>
<phase>validate</phase>
<goals>
<goal>add-test-source</goal>
</goals>
<configuration>
<sources>
<source>src/test/scala</source>
</sources>
</configuration>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.xolstice.maven.plugins</groupId>
<artifactId>protobuf-maven-plugin</artifactId>
<executions>
<execution>
<id>compile-protoc</id>
<phase>generate-sources</phase>
<goals>
<goal>compile</goal>
</goals>
</execution>
</executions>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<executions>
<!-- purposefully have jsr 305 exclusion only warn in this module -->
<execution>
<id>banned-jsr305</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<fail>false</fail>
</configuration>
</execution>
<!-- scala is ok in the spark modules -->
<execution>
<id>banned-scala</id>
<goals>
<goal>enforce</goal>
</goals>
<configuration>
<skip>true</skip>
</configuration>
</execution>
</executions>
</plugin>
</plugins>
</build>
<profiles>
<!-- Skip the tests in this module -->
<profile>
<id>skipSparkTests</id>
<activation>
<property>
<name>skipSparkTests</name>
</property>
</activation>
<properties>
<surefire.skipFirstPart>true</surefire.skipFirstPart>
<surefire.skipSecondPart>true</surefire.skipSecondPart>
<skipTests>true</skipTests>
</properties>
</profile>
</profiles>
</project>

View File

@ -140,6 +140,12 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
@ -184,6 +190,10 @@
<artifactId>hadoop-hdfs</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -196,6 +206,10 @@
<type>test-jar</type>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -207,6 +221,10 @@
<artifactId>hadoop-minicluster</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -239,11 +257,23 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<scope>compile</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>

View File

@ -513,12 +513,22 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
@ -571,10 +581,22 @@
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-common</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
<dependency>
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-minicluster</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
</exclusions>
</dependency>
</dependencies>
<build>

61
pom.xml
View File

@ -1344,7 +1344,8 @@
<jruby.version>9.1.10.0</jruby.version>
<junit.version>4.12</junit.version>
<hamcrest.version>1.3</hamcrest.version>
<htrace.version>3.2.0-incubating</htrace.version>
<htrace.version>4.2.0-incubating</htrace.version>
<htrace-hadoop.version>3.2.0-incubating</htrace-hadoop.version>
<log4j.version>1.2.17</log4j.version>
<mockito-core.version>2.1.0</mockito-core.version>
<!--Internally we use a different version of protobuf. See hbase-protocol-shaded-->
@ -1959,7 +1960,7 @@
</dependency>
<dependency>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
<artifactId>htrace-core4</artifactId>
<version>${htrace.version}</version>
</dependency>
<dependency>
@ -2392,6 +2393,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -2430,6 +2435,10 @@
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -2470,6 +2479,10 @@
<artifactId>hadoop-common</artifactId>
<version>${hadoop-two.version}</version>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
@ -2520,10 +2533,14 @@
<artifactId>hadoop-minicluster</artifactId>
<version>${hadoop-two.version}</version>
<exclusions>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -2630,6 +2647,10 @@
<groupId>org.apache.hadoop</groupId>
<artifactId>hadoop-hdfs</artifactId>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -2664,6 +2685,10 @@
<type>test-jar</type>
<scope>test</scope>
<exclusions>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -2706,10 +2731,14 @@
<artifactId>hadoop-common</artifactId>
<version>${hadoop-three.version}</version>
<exclusions>
<exclusion>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-beanutils</groupId>
<artifactId>commons-beanutils</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>
@ -2761,10 +2790,14 @@
<artifactId>hadoop-minicluster</artifactId>
<version>${hadoop-three.version}</version>
<exclusions>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>org.apache.htrace</groupId>
<artifactId>htrace-core</artifactId>
</exclusion>
<exclusion>
<groupId>commons-httpclient</groupId>
<artifactId>commons-httpclient</artifactId>
</exclusion>
<exclusion>
<groupId>javax.servlet.jsp</groupId>
<artifactId>jsp-api</artifactId>

View File

@ -57,7 +57,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_ for a `hbase.local-fi
<property>
<name>hbase.trace.spanreceiver.classes</name>
<value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
<value>org.apache.htrace.core.LocalFileSpanReceiver</value>
</property>
<property>
<name>hbase.htrace.local-file-span-receiver.path</name>
@ -76,7 +76,7 @@ _htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7
<property>
<name>hbase.trace.spanreceiver.classes</name>
<value>org.apache.htrace.impl.ZipkinSpanReceiver</value>
<value>org.apache.htrace.core.ZipkinSpanReceiver</value>
</property>
<property>
<name>hbase.htrace.zipkin.collector-hostname</name>