HBASE-18601: Update Htrace to 4.2
Updated HTrace version to 4.2 Created TraceUtil class to wrap htrace methods. Uses try with resources. Signed-off-by: Balazs Meszaros <balazs.meszaros@cloudera.com> Signed-off-by: Michael Stack <stack@apache.org>
This commit is contained in:
parent
8a5273f38c
commit
7a69ebc73e
|
@ -174,6 +174,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>net.java.dev.jets3t</groupId>
|
<groupId>net.java.dev.jets3t</groupId>
|
||||||
<artifactId>jets3t</artifactId>
|
<artifactId>jets3t</artifactId>
|
||||||
|
@ -287,6 +291,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
|
@ -166,7 +166,7 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.jruby.jcodings</groupId>
|
<groupId>org.jruby.jcodings</groupId>
|
||||||
|
@ -258,6 +258,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>net.java.dev.jets3t</groupId>
|
<groupId>net.java.dev.jets3t</groupId>
|
||||||
<artifactId>jets3t</artifactId>
|
<artifactId>jets3t</artifactId>
|
||||||
|
@ -326,6 +330,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
|
@ -48,6 +48,7 @@ import org.apache.hadoop.hbase.RegionLocations;
|
||||||
import org.apache.hadoop.hbase.RetryImmediatelyException;
|
import org.apache.hadoop.hbase.RetryImmediatelyException;
|
||||||
import org.apache.hadoop.hbase.ServerName;
|
import org.apache.hadoop.hbase.ServerName;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
|
import org.apache.hadoop.hbase.client.backoff.ServerStatistics;
|
||||||
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
import org.apache.hadoop.hbase.client.coprocessor.Batch;
|
||||||
|
@ -56,7 +57,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The context, and return value, for a single submit/submitAll call.
|
* The context, and return value, for a single submit/submitAll call.
|
||||||
|
@ -582,7 +583,13 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
|
||||||
asyncProcess.incTaskCounters(multiAction.getRegions(), server);
|
asyncProcess.incTaskCounters(multiAction.getRegions(), server);
|
||||||
SingleServerRequestRunnable runnable = createSingleServerRequest(
|
SingleServerRequestRunnable runnable = createSingleServerRequest(
|
||||||
multiAction, numAttempt, server, callsInProgress);
|
multiAction, numAttempt, server, callsInProgress);
|
||||||
return Collections.singletonList(Trace.wrap("AsyncProcess.sendMultiAction", runnable));
|
Tracer tracer = Tracer.curThreadTracer();
|
||||||
|
|
||||||
|
if (tracer == null) {
|
||||||
|
return Collections.singletonList(runnable);
|
||||||
|
} else {
|
||||||
|
return Collections.singletonList(tracer.wrap(runnable, "AsyncProcess.sendMultiAction"));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// group the actions by the amount of delay
|
// group the actions by the amount of delay
|
||||||
|
@ -618,7 +625,7 @@ class AsyncRequestFutureImpl<CResult> implements AsyncRequestFuture {
|
||||||
asyncProcess.connection.getConnectionMetrics().incrNormalRunners();
|
asyncProcess.connection.getConnectionMetrics().incrNormalRunners();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
runnable = Trace.wrap(traceText, runnable);
|
runnable = TraceUtil.wrap(runnable, traceText);
|
||||||
toReturn.add(runnable);
|
toReturn.add(runnable);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,9 +28,9 @@ import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A completion service for the RpcRetryingCallerFactory.
|
* A completion service for the RpcRetryingCallerFactory.
|
||||||
|
@ -168,7 +168,7 @@ public class ResultBoundedCompletionService<V> {
|
||||||
|
|
||||||
public void submit(RetryingCallable<V> task, int callTimeout, int id) {
|
public void submit(RetryingCallable<V> task, int callTimeout, int id) {
|
||||||
QueueingFuture<V> newFuture = new QueueingFuture<>(task, callTimeout, id);
|
QueueingFuture<V> newFuture = new QueueingFuture<>(task, callTimeout, id);
|
||||||
executor.execute(Trace.wrap(newFuture));
|
executor.execute(TraceUtil.wrap(newFuture, "ResultBoundedCompletionService.submit"));
|
||||||
tasks[id] = newFuture;
|
tasks[id] = newFuture;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,10 +24,6 @@ import static org.apache.hadoop.hbase.ipc.IPCUtil.isFatalConnectionException;
|
||||||
import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled;
|
import static org.apache.hadoop.hbase.ipc.IPCUtil.setCancelled;
|
||||||
import static org.apache.hadoop.hbase.ipc.IPCUtil.write;
|
import static org.apache.hadoop.hbase.ipc.IPCUtil.write;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
|
|
||||||
|
|
||||||
import java.io.BufferedInputStream;
|
import java.io.BufferedInputStream;
|
||||||
import java.io.BufferedOutputStream;
|
import java.io.BufferedOutputStream;
|
||||||
import java.io.DataInputStream;
|
import java.io.DataInputStream;
|
||||||
|
@ -55,10 +51,15 @@ import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
|
import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
|
||||||
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
|
import org.apache.hadoop.hbase.io.ByteArrayOutputStream;
|
||||||
import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback;
|
import org.apache.hadoop.hbase.ipc.HBaseRpcController.CancellationCallback;
|
||||||
|
import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
|
||||||
|
import org.apache.hadoop.hbase.security.SaslUtil;
|
||||||
|
import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message.Builder;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcCallback;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
|
||||||
|
@ -66,17 +67,15 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ConnectionHea
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeader;
|
||||||
import org.apache.hadoop.hbase.security.HBaseSaslRpcClient;
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.security.SaslUtil;
|
|
||||||
import org.apache.hadoop.hbase.security.SaslUtil.QualityOfProtection;
|
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.ExceptionUtil;
|
import org.apache.hadoop.hbase.util.ExceptionUtil;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread that reads responses and notifies callers. Each connection owns a socket connected to a
|
* Thread that reads responses and notifies callers. Each connection owns a socket connected to a
|
||||||
|
@ -574,7 +573,8 @@ class BlockingRpcConnection extends RpcConnection implements Runnable {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void tracedWriteRequest(Call call) throws IOException {
|
private void tracedWriteRequest(Call call) throws IOException {
|
||||||
try (TraceScope ignored = Trace.startSpan("RpcClientImpl.tracedWriteRequest", call.span)) {
|
try (TraceScope ignored = TraceUtil.createTrace("RpcClientImpl.tracedWriteRequest",
|
||||||
|
call.span)) {
|
||||||
writeRequest(call);
|
writeRequest(call);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,8 +30,8 @@ import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.MetricsConnection;
|
import org.apache.hadoop.hbase.client.MetricsConnection;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
/** A call waiting for a value. */
|
/** A call waiting for a value. */
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -73,7 +73,7 @@ class Call {
|
||||||
this.timeout = timeout;
|
this.timeout = timeout;
|
||||||
this.priority = priority;
|
this.priority = priority;
|
||||||
this.callback = callback;
|
this.callback = callback;
|
||||||
this.span = Trace.currentSpan();
|
this.span = Tracer.getCurrentSpan();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -35,7 +35,6 @@ import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.CellBlockMeta;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ExceptionResponse;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.TracingProtos.RPCTInfo;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
@ -102,10 +101,11 @@ class IPCUtil {
|
||||||
static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) {
|
static RequestHeader buildRequestHeader(Call call, CellBlockMeta cellBlockMeta) {
|
||||||
RequestHeader.Builder builder = RequestHeader.newBuilder();
|
RequestHeader.Builder builder = RequestHeader.newBuilder();
|
||||||
builder.setCallId(call.id);
|
builder.setCallId(call.id);
|
||||||
if (call.span != null) {
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
|
/*if (call.span != null) {
|
||||||
builder.setTraceInfo(RPCTInfo.newBuilder().setParentId(call.span.getSpanId())
|
builder.setTraceInfo(RPCTInfo.newBuilder().setParentId(call.span.getSpanId())
|
||||||
.setTraceId(call.span.getTraceId()));
|
.setTraceId(call.span.getTracerId()));
|
||||||
}
|
}*/
|
||||||
builder.setMethodName(call.md.getName());
|
builder.setMethodName(call.md.getName());
|
||||||
builder.setRequestParam(call.param != null);
|
builder.setRequestParam(call.param != null);
|
||||||
if (cellBlockMeta != null) {
|
if (cellBlockMeta != null) {
|
||||||
|
|
|
@ -33,8 +33,9 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.RetryCounter;
|
import org.apache.hadoop.hbase.util.RetryCounter;
|
||||||
import org.apache.hadoop.hbase.util.RetryCounterFactory;
|
import org.apache.hadoop.hbase.util.RetryCounterFactory;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.zookeeper.AsyncCallback;
|
import org.apache.zookeeper.AsyncCallback;
|
||||||
import org.apache.zookeeper.CreateMode;
|
import org.apache.zookeeper.CreateMode;
|
||||||
|
@ -156,11 +157,8 @@ public class RecoverableZooKeeper {
|
||||||
* This function will not throw NoNodeException if the path does not
|
* This function will not throw NoNodeException if the path does not
|
||||||
* exist.
|
* exist.
|
||||||
*/
|
*/
|
||||||
public void delete(String path, int version)
|
public void delete(String path, int version) throws InterruptedException, KeeperException {
|
||||||
throws InterruptedException, KeeperException {
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.delete")) {
|
||||||
TraceScope traceScope = null;
|
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.delete");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
boolean isRetry = false; // False for first attempt, true for all retries.
|
boolean isRetry = false; // False for first attempt, true for all retries.
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -197,8 +195,6 @@ public class RecoverableZooKeeper {
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
isRetry = true;
|
isRetry = true;
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,11 +202,8 @@ public class RecoverableZooKeeper {
|
||||||
* exists is an idempotent operation. Retry before throwing exception
|
* exists is an idempotent operation. Retry before throwing exception
|
||||||
* @return A Stat instance
|
* @return A Stat instance
|
||||||
*/
|
*/
|
||||||
public Stat exists(String path, Watcher watcher)
|
public Stat exists(String path, Watcher watcher) throws KeeperException, InterruptedException {
|
||||||
throws KeeperException, InterruptedException {
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
|
||||||
TraceScope traceScope = null;
|
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.exists");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -236,8 +229,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,11 +236,8 @@ public class RecoverableZooKeeper {
|
||||||
* exists is an idempotent operation. Retry before throwing exception
|
* exists is an idempotent operation. Retry before throwing exception
|
||||||
* @return A Stat instance
|
* @return A Stat instance
|
||||||
*/
|
*/
|
||||||
public Stat exists(String path, boolean watch)
|
public Stat exists(String path, boolean watch) throws KeeperException, InterruptedException {
|
||||||
throws KeeperException, InterruptedException {
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.exists")) {
|
||||||
TraceScope traceScope = null;
|
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.exists");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -275,8 +263,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -297,9 +283,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public List<String> getChildren(String path, Watcher watcher)
|
public List<String> getChildren(String path, Watcher watcher)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -325,8 +309,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -336,9 +318,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public List<String> getChildren(String path, boolean watch)
|
public List<String> getChildren(String path, boolean watch)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getChildren")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.getChildren");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -364,8 +344,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -375,9 +353,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public byte[] getData(String path, Watcher watcher, Stat stat)
|
public byte[] getData(String path, Watcher watcher, Stat stat)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.getData");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -403,8 +379,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -414,9 +388,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public byte[] getData(String path, boolean watch, Stat stat)
|
public byte[] getData(String path, boolean watch, Stat stat)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getData")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.getData");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -442,8 +414,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -455,9 +425,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public Stat setData(String path, byte[] data, int version)
|
public Stat setData(String path, byte[] data, int version)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setData")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.setData");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
byte[] newData = appendMetaData(id, data);
|
byte[] newData = appendMetaData(id, data);
|
||||||
boolean isRetry = false;
|
boolean isRetry = false;
|
||||||
|
@ -505,8 +473,6 @@ public class RecoverableZooKeeper {
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
isRetry = true;
|
isRetry = true;
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -516,9 +482,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public List<ACL> getAcl(String path, Stat stat)
|
public List<ACL> getAcl(String path, Stat stat)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.getAcl")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.getAcl");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -544,8 +508,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -555,9 +517,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public Stat setAcl(String path, List<ACL> acls, int version)
|
public Stat setAcl(String path, List<ACL> acls, int version)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.setAcl")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.setAcl");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
|
@ -583,8 +543,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -606,9 +564,7 @@ public class RecoverableZooKeeper {
|
||||||
public String create(String path, byte[] data, List<ACL> acl,
|
public String create(String path, byte[] data, List<ACL> acl,
|
||||||
CreateMode createMode)
|
CreateMode createMode)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.create")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.create");
|
|
||||||
byte[] newData = appendMetaData(id, data);
|
byte[] newData = appendMetaData(id, data);
|
||||||
switch (createMode) {
|
switch (createMode) {
|
||||||
case EPHEMERAL:
|
case EPHEMERAL:
|
||||||
|
@ -623,8 +579,6 @@ public class RecoverableZooKeeper {
|
||||||
throw new IllegalArgumentException("Unrecognized CreateMode: " +
|
throw new IllegalArgumentException("Unrecognized CreateMode: " +
|
||||||
createMode);
|
createMode);
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -753,9 +707,7 @@ public class RecoverableZooKeeper {
|
||||||
*/
|
*/
|
||||||
public List<OpResult> multi(Iterable<Op> ops)
|
public List<OpResult> multi(Iterable<Op> ops)
|
||||||
throws KeeperException, InterruptedException {
|
throws KeeperException, InterruptedException {
|
||||||
TraceScope traceScope = null;
|
try (TraceScope scope = TraceUtil.createTrace("RecoverableZookeeper.multi")) {
|
||||||
try {
|
|
||||||
traceScope = Trace.startSpan("RecoverableZookeeper.multi");
|
|
||||||
RetryCounter retryCounter = retryCounterFactory.create();
|
RetryCounter retryCounter = retryCounterFactory.create();
|
||||||
Iterable<Op> multiOps = prepareZKMulti(ops);
|
Iterable<Op> multiOps = prepareZKMulti(ops);
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -782,8 +734,6 @@ public class RecoverableZooKeeper {
|
||||||
}
|
}
|
||||||
retryCounter.sleepUntilNextRetry();
|
retryCounter.sleepUntilNextRetry();
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (traceScope != null) traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -244,7 +244,7 @@
|
||||||
<!-- tracing Dependencies -->
|
<!-- tracing Dependencies -->
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
|
@ -344,6 +344,12 @@
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<!--FYI This pulls in hadoop's guava. Its needed for Configuration
|
<!--FYI This pulls in hadoop's guava. Its needed for Configuration
|
||||||
at least-->
|
at least-->
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
|
@ -390,6 +396,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
|
|
|
@ -18,16 +18,15 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.trace;
|
package org.apache.hadoop.hbase.trace;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.htrace.HTraceConfiguration;
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.htrace.core.HTraceConfiguration;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class HBaseHTraceConfiguration extends HTraceConfiguration {
|
public class HBaseHTraceConfiguration extends HTraceConfiguration {
|
||||||
private static final Log LOG =
|
private static final Log LOG = LogFactory.getLog(HBaseHTraceConfiguration.class);
|
||||||
LogFactory.getLog(HBaseHTraceConfiguration.class);
|
|
||||||
|
|
||||||
public static final String KEY_PREFIX = "hbase.htrace.";
|
public static final String KEY_PREFIX = "hbase.htrace.";
|
||||||
|
|
||||||
|
@ -65,7 +64,7 @@ public class HBaseHTraceConfiguration extends HTraceConfiguration {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String get(String key) {
|
public String get(String key) {
|
||||||
return conf.get(KEY_PREFIX +key);
|
return conf.get(KEY_PREFIX + key);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -24,10 +24,8 @@ import java.util.HashSet;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.htrace.core.SpanReceiver;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.htrace.SpanReceiver;
|
|
||||||
import org.apache.htrace.SpanReceiverBuilder;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class provides functions for reading the names of SpanReceivers from
|
* This class provides functions for reading the names of SpanReceivers from
|
||||||
|
@ -62,6 +60,16 @@ public class SpanReceiverHost {
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static Configuration getConfiguration(){
|
||||||
|
synchronized (SingletonHolder.INSTANCE.lock) {
|
||||||
|
if (SingletonHolder.INSTANCE.host == null || SingletonHolder.INSTANCE.host.conf == null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
return SingletonHolder.INSTANCE.host.conf;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
SpanReceiverHost(Configuration conf) {
|
SpanReceiverHost(Configuration conf) {
|
||||||
receivers = new HashSet<>();
|
receivers = new HashSet<>();
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
|
@ -78,18 +86,18 @@ public class SpanReceiverHost {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
SpanReceiverBuilder builder = new SpanReceiverBuilder(new HBaseHTraceConfiguration(conf));
|
SpanReceiver.Builder builder = new SpanReceiver.Builder(new HBaseHTraceConfiguration(conf));
|
||||||
for (String className : receiverNames) {
|
for (String className : receiverNames) {
|
||||||
className = className.trim();
|
className = className.trim();
|
||||||
|
|
||||||
SpanReceiver receiver = builder.spanReceiverClass(className).build();
|
SpanReceiver receiver = builder.className(className).build();
|
||||||
if (receiver != null) {
|
if (receiver != null) {
|
||||||
receivers.add(receiver);
|
receivers.add(receiver);
|
||||||
LOG.info("SpanReceiver " + className + " was loaded successfully.");
|
LOG.info("SpanReceiver " + className + " was loaded successfully.");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (SpanReceiver rcvr : receivers) {
|
for (SpanReceiver rcvr : receivers) {
|
||||||
Trace.addReceiver(rcvr);
|
TraceUtil.addReceiver(rcvr);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,124 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.trace;
|
||||||
|
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.htrace.core.HTraceConfiguration;
|
||||||
|
import org.apache.htrace.core.Sampler;
|
||||||
|
import org.apache.htrace.core.Span;
|
||||||
|
import org.apache.htrace.core.SpanReceiver;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This wrapper class provides functions for accessing htrace 4+ functionality in a simplified way.
|
||||||
|
*/
|
||||||
|
public final class TraceUtil {
|
||||||
|
private static HTraceConfiguration conf;
|
||||||
|
private static Tracer tracer;
|
||||||
|
|
||||||
|
private TraceUtil() {
|
||||||
|
}
|
||||||
|
|
||||||
|
public static void initTracer(Configuration c) {
|
||||||
|
if(c != null) {
|
||||||
|
conf = new HBaseHTraceConfiguration(c);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tracer == null && conf != null) {
|
||||||
|
tracer = new Tracer.Builder("Tracer").conf(conf).build();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to create new TraceScope with the given description
|
||||||
|
* @return TraceScope or null when not tracing
|
||||||
|
*/
|
||||||
|
public static TraceScope createTrace(String description) {
|
||||||
|
return (tracer == null) ? null : tracer.newScope(description);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to create new child TraceScope with the given description
|
||||||
|
* and parent scope's spanId
|
||||||
|
* @param span parent span
|
||||||
|
* @return TraceScope or null when not tracing
|
||||||
|
*/
|
||||||
|
public static TraceScope createTrace(String description, Span span) {
|
||||||
|
if(span == null) return createTrace(description);
|
||||||
|
|
||||||
|
return (tracer == null) ? null : tracer.newScope(description, span.getSpanId());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to add new sampler to the default tracer
|
||||||
|
* @return true if added, false if it was already added
|
||||||
|
*/
|
||||||
|
public static boolean addSampler(Sampler sampler) {
|
||||||
|
if (sampler == null) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
return (tracer == null) ? false : tracer.addSampler(sampler);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to add key-value pair to TraceInfo of actual span
|
||||||
|
*/
|
||||||
|
public static void addKVAnnotation(String key, String value){
|
||||||
|
Span span = Tracer.getCurrentSpan();
|
||||||
|
if (span != null) {
|
||||||
|
span.addKVAnnotation(key, value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to add receiver to actual tracerpool
|
||||||
|
* @return true if successfull, false if it was already added
|
||||||
|
*/
|
||||||
|
public static boolean addReceiver(SpanReceiver rcvr) {
|
||||||
|
return (tracer == null) ? false : tracer.getTracerPool().addReceiver(rcvr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to remove receiver from actual tracerpool
|
||||||
|
* @return true if removed, false if doesn't exist
|
||||||
|
*/
|
||||||
|
public static boolean removeReceiver(SpanReceiver rcvr) {
|
||||||
|
return (tracer == null) ? false : tracer.getTracerPool().removeReceiver(rcvr);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method to add timeline annotiation to current span with given message
|
||||||
|
*/
|
||||||
|
public static void addTimelineAnnotation(String msg) {
|
||||||
|
Span span = Tracer.getCurrentSpan();
|
||||||
|
if (span != null) {
|
||||||
|
span.addTimelineAnnotation(msg);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrap runnable with current tracer and description
|
||||||
|
* @param runnable to wrap
|
||||||
|
* @return wrapped runnable or original runnable when not tracing
|
||||||
|
*/
|
||||||
|
public static Runnable wrap(Runnable runnable, String description) {
|
||||||
|
return (tracer == null) ? runnable : tracer.wrap(runnable, description);
|
||||||
|
}
|
||||||
|
}
|
|
@ -254,6 +254,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
@ -285,6 +291,10 @@
|
||||||
<type>test-jar</type>
|
<type>test-jar</type>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -296,6 +306,10 @@
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -330,11 +344,21 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
|
|
@ -245,6 +245,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
|
@ -290,10 +296,22 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
|
|
|
@ -173,7 +173,7 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>junit</groupId>
|
<groupId>junit</groupId>
|
||||||
|
@ -244,6 +244,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -296,6 +300,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
|
|
@ -1,3 +1,4 @@
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Copyright The Apache Software Foundation
|
* Copyright The Apache Software Foundation
|
||||||
*
|
*
|
||||||
|
@ -19,24 +20,6 @@
|
||||||
|
|
||||||
package org.apache.hadoop.hbase.io.hfile;
|
package org.apache.hadoop.hbase.io.hfile;
|
||||||
|
|
||||||
import net.spy.memcached.CachedData;
|
|
||||||
import net.spy.memcached.ConnectionFactoryBuilder;
|
|
||||||
import net.spy.memcached.FailureMode;
|
|
||||||
import net.spy.memcached.MemcachedClient;
|
|
||||||
import net.spy.memcached.transcoders.Transcoder;
|
|
||||||
import org.apache.commons.logging.Log;
|
|
||||||
import org.apache.commons.logging.LogFactory;
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
|
|
||||||
import org.apache.hadoop.hbase.nio.ByteBuff;
|
|
||||||
import org.apache.hadoop.hbase.nio.SingleByteBuff;
|
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.nio.ByteBuffer;
|
import java.nio.ByteBuffer;
|
||||||
|
@ -46,6 +29,24 @@ import java.util.List;
|
||||||
import java.util.NoSuchElementException;
|
import java.util.NoSuchElementException;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
|
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
import org.apache.hadoop.hbase.io.hfile.Cacheable.MemoryType;
|
||||||
|
import org.apache.hadoop.hbase.nio.ByteBuff;
|
||||||
|
import org.apache.hadoop.hbase.nio.SingleByteBuff;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
|
import net.spy.memcached.CachedData;
|
||||||
|
import net.spy.memcached.ConnectionFactoryBuilder;
|
||||||
|
import net.spy.memcached.FailureMode;
|
||||||
|
import net.spy.memcached.MemcachedClient;
|
||||||
|
import net.spy.memcached.transcoders.Transcoder;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Class to store blocks into memcached.
|
* Class to store blocks into memcached.
|
||||||
* This should only be used on a cluster of Memcached daemons that are tuned well and have a
|
* This should only be used on a cluster of Memcached daemons that are tuned well and have a
|
||||||
|
@ -134,7 +135,7 @@ public class MemcachedBlockCache implements BlockCache {
|
||||||
// Assume that nothing is the block cache
|
// Assume that nothing is the block cache
|
||||||
HFileBlock result = null;
|
HFileBlock result = null;
|
||||||
|
|
||||||
try (TraceScope traceScope = Trace.startSpan("MemcachedBlockCache.getBlock")) {
|
try (TraceScope traceScope = TraceUtil.createTrace("MemcachedBlockCache.getBlock")) {
|
||||||
result = client.get(cacheKey.toString(), tc);
|
result = client.get(cacheKey.toString(), tc);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Catch a pretty broad set of exceptions to limit any changes in the memecache client
|
// Catch a pretty broad set of exceptions to limit any changes in the memecache client
|
||||||
|
|
|
@ -170,6 +170,12 @@ limitations under the License.
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<version>${hadoop-two.version}</version>
|
<version>${hadoop-two.version}</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.commons</groupId>
|
<groupId>org.apache.commons</groupId>
|
||||||
|
|
|
@ -266,7 +266,7 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>javax.ws.rs</groupId>
|
<groupId>javax.ws.rs</groupId>
|
||||||
|
@ -350,6 +350,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
@ -400,10 +406,22 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.NamespaceNotFoundException;
|
||||||
import org.apache.hadoop.hbase.TableExistsException;
|
import org.apache.hadoop.hbase.TableExistsException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||||
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
|
||||||
import org.apache.hadoop.hbase.chaos.actions.Action;
|
import org.apache.hadoop.hbase.chaos.actions.Action;
|
||||||
import org.apache.hadoop.hbase.chaos.actions.MoveRegionsOfTableAction;
|
import org.apache.hadoop.hbase.chaos.actions.MoveRegionsOfTableAction;
|
||||||
import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction;
|
import org.apache.hadoop.hbase.chaos.actions.RestartActiveMasterAction;
|
||||||
|
@ -62,20 +61,19 @@ import org.apache.hadoop.hbase.filter.KeyOnlyFilter;
|
||||||
import org.apache.hadoop.hbase.ipc.FatalConnectionException;
|
import org.apache.hadoop.hbase.ipc.FatalConnectionException;
|
||||||
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
|
import org.apache.hadoop.hbase.regionserver.NoSuchColumnFamilyException;
|
||||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
|
||||||
|
import org.apache.hadoop.hbase.testclassification.IntegrationTests;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.LoadTestTool;
|
import org.apache.hadoop.hbase.util.LoadTestTool;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
|
import org.apache.htrace.core.AlwaysSampler;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.apache.htrace.impl.AlwaysSampler;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.base.Objects;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Integration test that should benchmark how fast HBase can recover from failures. This test starts
|
* Integration test that should benchmark how fast HBase can recover from failures. This test starts
|
||||||
* different threads:
|
* different threads:
|
||||||
|
@ -268,7 +266,7 @@ public class IntegrationTestMTTR {
|
||||||
|
|
||||||
loadTool = null;
|
loadTool = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static boolean tablesOnMaster() {
|
private static boolean tablesOnMaster() {
|
||||||
boolean ret = true;
|
boolean ret = true;
|
||||||
String value = util.getConfiguration().get("hbase.balancer.tablesOnMaster");
|
String value = util.getConfiguration().get("hbase.balancer.tablesOnMaster");
|
||||||
|
@ -369,7 +367,7 @@ public class IntegrationTestMTTR {
|
||||||
*/
|
*/
|
||||||
private static class TimingResult {
|
private static class TimingResult {
|
||||||
DescriptiveStatistics stats = new DescriptiveStatistics();
|
DescriptiveStatistics stats = new DescriptiveStatistics();
|
||||||
ArrayList<Long> traces = new ArrayList<>(10);
|
ArrayList<String> traces = new ArrayList<>(10);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a result to this aggregate result.
|
* Add a result to this aggregate result.
|
||||||
|
@ -377,9 +375,12 @@ public class IntegrationTestMTTR {
|
||||||
* @param span Span. To be kept if the time taken was over 1 second
|
* @param span Span. To be kept if the time taken was over 1 second
|
||||||
*/
|
*/
|
||||||
public void addResult(long time, Span span) {
|
public void addResult(long time, Span span) {
|
||||||
|
if (span == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS));
|
stats.addValue(TimeUnit.MILLISECONDS.convert(time, TimeUnit.NANOSECONDS));
|
||||||
if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) {
|
if (TimeUnit.SECONDS.convert(time, TimeUnit.NANOSECONDS) >= 1) {
|
||||||
traces.add(span.getTraceId());
|
traces.add(span.getTracerId());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -419,12 +420,15 @@ public class IntegrationTestMTTR {
|
||||||
final int maxIterations = 10;
|
final int maxIterations = 10;
|
||||||
int numAfterDone = 0;
|
int numAfterDone = 0;
|
||||||
int resetCount = 0;
|
int resetCount = 0;
|
||||||
|
TraceUtil.addSampler(AlwaysSampler.INSTANCE);
|
||||||
// Keep trying until the rs is back up and we've gotten a put through
|
// Keep trying until the rs is back up and we've gotten a put through
|
||||||
while (numAfterDone < maxIterations) {
|
while (numAfterDone < maxIterations) {
|
||||||
long start = System.nanoTime();
|
long start = System.nanoTime();
|
||||||
TraceScope scope = null;
|
Span span = null;
|
||||||
try {
|
try (TraceScope scope = TraceUtil.createTrace(getSpanName())) {
|
||||||
scope = Trace.startSpan(getSpanName(), AlwaysSampler.INSTANCE);
|
if (scope != null) {
|
||||||
|
span = scope.getSpan();
|
||||||
|
}
|
||||||
boolean actionResult = doAction();
|
boolean actionResult = doAction();
|
||||||
if (actionResult && future.isDone()) {
|
if (actionResult && future.isDone()) {
|
||||||
numAfterDone++;
|
numAfterDone++;
|
||||||
|
@ -470,12 +474,8 @@ public class IntegrationTestMTTR {
|
||||||
LOG.info("Too many unexpected Exceptions. Aborting.", e);
|
LOG.info("Too many unexpected Exceptions. Aborting.", e);
|
||||||
throw e;
|
throw e;
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (scope != null) {
|
|
||||||
scope.close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
result.addResult(System.nanoTime() - start, scope.getSpan());
|
result.addResult(System.nanoTime() - start, span);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,9 +35,8 @@ import org.apache.hadoop.hbase.client.Table;
|
||||||
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.Sampler;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
|
|
||||||
|
@ -117,13 +116,12 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
|
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
Runnable runnable = new Runnable() {
|
Runnable runnable = new Runnable() {
|
||||||
private TraceScope innerScope = null;
|
|
||||||
private final LinkedBlockingQueue<Long> rowKeyQueue = rks;
|
private final LinkedBlockingQueue<Long> rowKeyQueue = rks;
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
ResultScanner rs = null;
|
ResultScanner rs = null;
|
||||||
try {
|
TraceUtil.addSampler(Sampler.ALWAYS);
|
||||||
innerScope = Trace.startSpan("Scan", Sampler.ALWAYS);
|
try (TraceScope scope = TraceUtil.createTrace("Scan")){
|
||||||
Table ht = util.getConnection().getTable(tableName);
|
Table ht = util.getConnection().getTable(tableName);
|
||||||
Scan s = new Scan();
|
Scan s = new Scan();
|
||||||
s.setStartRow(Bytes.toBytes(rowKeyQueue.take()));
|
s.setStartRow(Bytes.toBytes(rowKeyQueue.take()));
|
||||||
|
@ -137,20 +135,15 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
accum |= Bytes.toLong(r.getRow());
|
accum |= Bytes.toLong(r.getRow());
|
||||||
}
|
}
|
||||||
|
|
||||||
innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum);
|
TraceUtil.addTimelineAnnotation("Accum result = " + accum);
|
||||||
|
|
||||||
ht.close();
|
ht.close();
|
||||||
ht = null;
|
ht = null;
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e.printStackTrace();
|
e.printStackTrace();
|
||||||
|
TraceUtil.addKVAnnotation("exception", e.getClass().getSimpleName());
|
||||||
innerScope.getSpan().addKVAnnotation(
|
|
||||||
Bytes.toBytes("exception"),
|
|
||||||
Bytes.toBytes(e.getClass().getSimpleName()));
|
|
||||||
|
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
} finally {
|
} finally {
|
||||||
if (innerScope != null) innerScope.close();
|
|
||||||
if (rs != null) rs.close();
|
if (rs != null) rs.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -165,7 +158,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
throws IOException {
|
throws IOException {
|
||||||
for (int i = 0; i < 100; i++) {
|
for (int i = 0; i < 100; i++) {
|
||||||
Runnable runnable = new Runnable() {
|
Runnable runnable = new Runnable() {
|
||||||
private TraceScope innerScope = null;
|
|
||||||
private final LinkedBlockingQueue<Long> rowKeyQueue = rowKeys;
|
private final LinkedBlockingQueue<Long> rowKeyQueue = rowKeys;
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -180,9 +172,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
}
|
}
|
||||||
|
|
||||||
long accum = 0;
|
long accum = 0;
|
||||||
|
TraceUtil.addSampler(Sampler.ALWAYS);
|
||||||
for (int x = 0; x < 5; x++) {
|
for (int x = 0; x < 5; x++) {
|
||||||
try {
|
try (TraceScope scope = TraceUtil.createTrace("gets")) {
|
||||||
innerScope = Trace.startSpan("gets", Sampler.ALWAYS);
|
|
||||||
long rk = rowKeyQueue.take();
|
long rk = rowKeyQueue.take();
|
||||||
Result r1 = ht.get(new Get(Bytes.toBytes(rk)));
|
Result r1 = ht.get(new Get(Bytes.toBytes(rk)));
|
||||||
if (r1 != null) {
|
if (r1 != null) {
|
||||||
|
@ -192,14 +184,10 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
if (r2 != null) {
|
if (r2 != null) {
|
||||||
accum |= Bytes.toLong(r2.getRow());
|
accum |= Bytes.toLong(r2.getRow());
|
||||||
}
|
}
|
||||||
innerScope.getSpan().addTimelineAnnotation("Accum = " + accum);
|
TraceUtil.addTimelineAnnotation("Accum = " + accum);
|
||||||
|
|
||||||
} catch (IOException e) {
|
} catch (IOException|InterruptedException ie) {
|
||||||
// IGNORED
|
// IGNORED
|
||||||
} catch (InterruptedException ie) {
|
|
||||||
// IGNORED
|
|
||||||
} finally {
|
|
||||||
if (innerScope != null) innerScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,25 +198,18 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void createTable() throws IOException {
|
private void createTable() throws IOException {
|
||||||
TraceScope createScope = null;
|
TraceUtil.addSampler(Sampler.ALWAYS);
|
||||||
try {
|
try (TraceScope scope = TraceUtil.createTrace("createTable")) {
|
||||||
createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
|
|
||||||
util.createTable(tableName, familyName);
|
util.createTable(tableName, familyName);
|
||||||
} finally {
|
|
||||||
if (createScope != null) createScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void deleteTable() throws IOException {
|
private void deleteTable() throws IOException {
|
||||||
TraceScope deleteScope = null;
|
TraceUtil.addSampler(Sampler.ALWAYS);
|
||||||
|
try (TraceScope scope = TraceUtil.createTrace("deleteTable")) {
|
||||||
try {
|
|
||||||
if (admin.tableExists(tableName)) {
|
if (admin.tableExists(tableName)) {
|
||||||
deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
|
|
||||||
util.deleteTable(tableName);
|
util.deleteTable(tableName);
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
if (deleteScope != null) deleteScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -236,9 +217,9 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
|
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<>(25000);
|
||||||
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
|
BufferedMutator ht = util.getConnection().getBufferedMutator(this.tableName);
|
||||||
byte[] value = new byte[300];
|
byte[] value = new byte[300];
|
||||||
|
TraceUtil.addSampler(Sampler.ALWAYS);
|
||||||
for (int x = 0; x < 5000; x++) {
|
for (int x = 0; x < 5000; x++) {
|
||||||
TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
|
try (TraceScope traceScope = TraceUtil.createTrace("insertData")) {
|
||||||
try {
|
|
||||||
for (int i = 0; i < 5; i++) {
|
for (int i = 0; i < 5; i++) {
|
||||||
long rk = random.nextLong();
|
long rk = random.nextLong();
|
||||||
rowKeys.add(rk);
|
rowKeys.add(rk);
|
||||||
|
@ -252,8 +233,6 @@ public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
|
||||||
if ((x % 1000) == 0) {
|
if ((x % 1000) == 0) {
|
||||||
admin.flush(tableName);
|
admin.flush(tableName);
|
||||||
}
|
}
|
||||||
} finally {
|
|
||||||
traceScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
admin.flush(tableName);
|
admin.flush(tableName);
|
||||||
|
|
|
@ -181,7 +181,7 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
@ -246,6 +246,10 @@
|
||||||
<type>test-jar</type>
|
<type>test-jar</type>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -331,6 +335,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>net.java.dev.jets3t</groupId>
|
<groupId>net.java.dev.jets3t</groupId>
|
||||||
<artifactId>jets3t</artifactId>
|
<artifactId>jets3t</artifactId>
|
||||||
|
@ -377,6 +385,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -415,6 +427,12 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
|
|
||||||
|
@ -439,11 +457,23 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<!--maven dependency:analyze says not needed but tests fail w/o-->
|
<!--maven dependency:analyze says not needed but tests fail w/o-->
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
|
@ -818,7 +818,7 @@ public class TableMapReduceUtil {
|
||||||
com.google.protobuf.Message.class,
|
com.google.protobuf.Message.class,
|
||||||
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.class,
|
org.apache.hadoop.hbase.shaded.com.google.protobuf.UnsafeByteOperations.class,
|
||||||
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
|
org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists.class,
|
||||||
org.apache.htrace.Trace.class,
|
org.apache.htrace.core.Tracer.class,
|
||||||
com.codahale.metrics.MetricRegistry.class,
|
com.codahale.metrics.MetricRegistry.class,
|
||||||
org.apache.commons.lang3.ArrayUtils.class,
|
org.apache.commons.lang3.ArrayUtils.class,
|
||||||
com.fasterxml.jackson.databind.ObjectMapper.class,
|
com.fasterxml.jackson.databind.ObjectMapper.class,
|
||||||
|
|
|
@ -31,10 +31,10 @@ import java.util.Date;
|
||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.NoSuchElementException;
|
||||||
import java.util.Queue;
|
import java.util.Queue;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.TreeMap;
|
import java.util.TreeMap;
|
||||||
import java.util.NoSuchElementException;
|
|
||||||
import java.util.concurrent.Callable;
|
import java.util.concurrent.Callable;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
|
@ -48,7 +48,6 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Append;
|
import org.apache.hadoop.hbase.client.Append;
|
||||||
import org.apache.hadoop.hbase.client.AsyncConnection;
|
import org.apache.hadoop.hbase.client.AsyncConnection;
|
||||||
|
@ -81,9 +80,17 @@ import org.apache.hadoop.hbase.io.hfile.RandomDistribution;
|
||||||
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
|
import org.apache.hadoop.hbase.mapreduce.TableMapReduceUtil;
|
||||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||||
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
import org.apache.hadoop.hbase.regionserver.CompactingMemStore;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
|
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
|
||||||
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
||||||
import org.apache.hadoop.hbase.util.*;
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
|
import org.apache.hadoop.hbase.util.ByteArrayHashKey;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.Hash;
|
||||||
|
import org.apache.hadoop.hbase.util.MurmurHash;
|
||||||
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
import org.apache.hadoop.hbase.util.YammerHistogramUtils;
|
||||||
import org.apache.hadoop.io.LongWritable;
|
import org.apache.hadoop.io.LongWritable;
|
||||||
import org.apache.hadoop.io.Text;
|
import org.apache.hadoop.io.Text;
|
||||||
import org.apache.hadoop.mapreduce.Job;
|
import org.apache.hadoop.mapreduce.Job;
|
||||||
|
@ -93,17 +100,15 @@ import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
|
||||||
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
|
import org.apache.hadoop.mapreduce.lib.reduce.LongSumReducer;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.ProbabilitySampler;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Sampler;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.impl.ProbabilitySampler;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.base.MoreObjects;
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
|
||||||
|
|
||||||
import com.codahale.metrics.Histogram;
|
import com.codahale.metrics.Histogram;
|
||||||
import com.codahale.metrics.UniformReservoir;
|
import com.codahale.metrics.UniformReservoir;
|
||||||
import com.fasterxml.jackson.databind.ObjectMapper;
|
|
||||||
import com.fasterxml.jackson.databind.MapperFeature;
|
import com.fasterxml.jackson.databind.MapperFeature;
|
||||||
|
import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Script used evaluating HBase performance and scalability. Runs a HBase
|
* Script used evaluating HBase performance and scalability. Runs a HBase
|
||||||
|
@ -1034,7 +1039,7 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
protected final TestOptions opts;
|
protected final TestOptions opts;
|
||||||
|
|
||||||
private final Status status;
|
private final Status status;
|
||||||
private final Sampler<?> traceSampler;
|
private final Sampler traceSampler;
|
||||||
private final SpanReceiverHost receiverHost;
|
private final SpanReceiverHost receiverHost;
|
||||||
|
|
||||||
private String testName;
|
private String testName;
|
||||||
|
@ -1182,17 +1187,15 @@ public class PerformanceEvaluation extends Configured implements Tool {
|
||||||
void testTimed() throws IOException, InterruptedException {
|
void testTimed() throws IOException, InterruptedException {
|
||||||
int startRow = getStartRow();
|
int startRow = getStartRow();
|
||||||
int lastRow = getLastRow();
|
int lastRow = getLastRow();
|
||||||
|
TraceUtil.addSampler(traceSampler);
|
||||||
// Report on completion of 1/10th of total.
|
// Report on completion of 1/10th of total.
|
||||||
for (int ii = 0; ii < opts.cycles; ii++) {
|
for (int ii = 0; ii < opts.cycles; ii++) {
|
||||||
if (opts.cycles > 1) LOG.info("Cycle=" + ii + " of " + opts.cycles);
|
if (opts.cycles > 1) LOG.info("Cycle=" + ii + " of " + opts.cycles);
|
||||||
for (int i = startRow; i < lastRow; i++) {
|
for (int i = startRow; i < lastRow; i++) {
|
||||||
if (i % everyN != 0) continue;
|
if (i % everyN != 0) continue;
|
||||||
long startTime = System.nanoTime();
|
long startTime = System.nanoTime();
|
||||||
TraceScope scope = Trace.startSpan("test row", traceSampler);
|
try (TraceScope scope = TraceUtil.createTrace("test row");){
|
||||||
try {
|
|
||||||
testRow(i);
|
testRow(i);
|
||||||
} finally {
|
|
||||||
scope.close();
|
|
||||||
}
|
}
|
||||||
if ( (i - startRow) > opts.measureAfter) {
|
if ( (i - startRow) > opts.measureAfter) {
|
||||||
// If multiget is enabled, say set to 10, testRow() returns immediately first 9 times
|
// If multiget is enabled, say set to 10, testRow() returns immediately first 9 times
|
||||||
|
|
|
@ -164,6 +164,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
|
@ -230,6 +230,10 @@
|
||||||
<artifactId>junit</artifactId>
|
<artifactId>junit</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core4</artifactId>
|
||||||
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<profiles>
|
<profiles>
|
||||||
<!-- Skip the tests in this module -->
|
<!-- Skip the tests in this module -->
|
||||||
|
|
|
@ -165,6 +165,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>net.java.dev.jets3t</groupId>
|
<groupId>net.java.dev.jets3t</groupId>
|
||||||
<artifactId>jets3t</artifactId>
|
<artifactId>jets3t</artifactId>
|
||||||
|
@ -229,6 +233,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
|
@ -202,6 +202,10 @@
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-client</artifactId>
|
<artifactId>hbase-client</artifactId>
|
||||||
</dependency>
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-hadoop-compat</artifactId>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hbase</groupId>
|
<groupId>org.apache.hbase</groupId>
|
||||||
<artifactId>hbase-server</artifactId>
|
<artifactId>hbase-server</artifactId>
|
||||||
|
@ -387,6 +391,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
@ -426,6 +436,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
|
|
@ -202,6 +202,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>net.java.dev.jets3t</groupId>
|
<groupId>net.java.dev.jets3t</groupId>
|
||||||
<artifactId>jets3t</artifactId>
|
<artifactId>jets3t</artifactId>
|
||||||
|
@ -265,6 +269,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
</profile>
|
</profile>
|
||||||
|
|
|
@ -538,9 +538,14 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
|
|
||||||
<!-- tracing Dependencies -->
|
<!-- tracing Dependencies -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core4</artifactId>
|
||||||
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core</artifactId>
|
||||||
|
<version>${htrace-hadoop.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>com.lmax</groupId>
|
<groupId>com.lmax</groupId>
|
||||||
|
|
|
@ -23,11 +23,12 @@ import java.util.concurrent.atomic.AtomicLong;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
import org.apache.hadoop.hbase.Server;
|
import org.apache.hadoop.hbase.Server;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
import org.apache.htrace.core.Tracer;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Abstract base class for all HBase event handlers. Subclasses should
|
* Abstract base class for all HBase event handlers. Subclasses should
|
||||||
|
@ -74,7 +75,7 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
|
||||||
* Default base class constructor.
|
* Default base class constructor.
|
||||||
*/
|
*/
|
||||||
public EventHandler(Server server, EventType eventType) {
|
public EventHandler(Server server, EventType eventType) {
|
||||||
this.parent = Trace.currentSpan();
|
this.parent = Tracer.getCurrentSpan();
|
||||||
this.server = server;
|
this.server = server;
|
||||||
this.eventType = eventType;
|
this.eventType = eventType;
|
||||||
seqid = seqids.incrementAndGet();
|
seqid = seqids.incrementAndGet();
|
||||||
|
@ -99,13 +100,10 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
|
try (TraceScope scope = TraceUtil.createTrace(this.getClass().getSimpleName(), parent)) {
|
||||||
try {
|
|
||||||
process();
|
process();
|
||||||
} catch(Throwable t) {
|
} catch(Throwable t) {
|
||||||
handleException(t);
|
handleException(t);
|
||||||
} finally {
|
|
||||||
chunk.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
import org.apache.hadoop.hbase.ByteBufferKeyValue;
|
||||||
import org.apache.hadoop.hbase.SizeCachedKeyValue;
|
import org.apache.hadoop.hbase.SizeCachedKeyValue;
|
||||||
import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue;
|
import org.apache.hadoop.hbase.SizeCachedNoTagsKeyValue;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.fs.HFileSystem;
|
import org.apache.hadoop.hbase.fs.HFileSystem;
|
||||||
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
import org.apache.hadoop.hbase.io.FSDataInputStreamWrapper;
|
||||||
|
@ -59,8 +60,7 @@ import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.IdLock;
|
import org.apache.hadoop.hbase.util.IdLock;
|
||||||
import org.apache.hadoop.hbase.util.ObjectIntPair;
|
import org.apache.hadoop.hbase.util.ObjectIntPair;
|
||||||
import org.apache.hadoop.io.WritableUtils;
|
import org.apache.hadoop.io.WritableUtils;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
@ -255,6 +255,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
// Prefetch file blocks upon open if requested
|
// Prefetch file blocks upon open if requested
|
||||||
if (cacheConf.shouldPrefetchOnOpen()) {
|
if (cacheConf.shouldPrefetchOnOpen()) {
|
||||||
PrefetchExecutor.request(path, new Runnable() {
|
PrefetchExecutor.request(path, new Runnable() {
|
||||||
|
@Override
|
||||||
public void run() {
|
public void run() {
|
||||||
long offset = 0;
|
long offset = 0;
|
||||||
long end = 0;
|
long end = 0;
|
||||||
|
@ -436,6 +437,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
* @return the total heap size of data and meta block indexes in bytes. Does
|
* @return the total heap size of data and meta block indexes in bytes. Does
|
||||||
* not take into account non-root blocks of a multilevel data index.
|
* not take into account non-root blocks of a multilevel data index.
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
public long indexSize() {
|
public long indexSize() {
|
||||||
return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
|
return (dataBlockIndexReader != null ? dataBlockIndexReader.heapSize() : 0)
|
||||||
+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize()
|
+ ((metaBlockIndexReader != null) ? metaBlockIndexReader.heapSize()
|
||||||
|
@ -1239,6 +1241,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public Path getPath() {
|
public Path getPath() {
|
||||||
return path;
|
return path;
|
||||||
}
|
}
|
||||||
|
@ -1276,10 +1279,12 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
protected boolean decodeMemstoreTS = false;
|
protected boolean decodeMemstoreTS = false;
|
||||||
|
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean isDecodeMemStoreTS() {
|
public boolean isDecodeMemStoreTS() {
|
||||||
return this.decodeMemstoreTS;
|
return this.decodeMemstoreTS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public boolean shouldIncludeMemStoreTS() {
|
public boolean shouldIncludeMemStoreTS() {
|
||||||
return includesMemstoreTS;
|
return includesMemstoreTS;
|
||||||
}
|
}
|
||||||
|
@ -1437,8 +1442,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
|
|
||||||
boolean useLock = false;
|
boolean useLock = false;
|
||||||
IdLock.Entry lockEntry = null;
|
IdLock.Entry lockEntry = null;
|
||||||
TraceScope traceScope = Trace.startSpan("HFileReaderImpl.readBlock");
|
try (TraceScope traceScope = TraceUtil.createTrace("HFileReaderImpl.readBlock")) {
|
||||||
try {
|
|
||||||
while (true) {
|
while (true) {
|
||||||
// Check cache for block. If found return.
|
// Check cache for block. If found return.
|
||||||
if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
|
if (cacheConf.shouldReadBlockFromCache(expectedBlockType)) {
|
||||||
|
@ -1453,9 +1457,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
if (LOG.isTraceEnabled()) {
|
if (LOG.isTraceEnabled()) {
|
||||||
LOG.trace("From Cache " + cachedBlock);
|
LOG.trace("From Cache " + cachedBlock);
|
||||||
}
|
}
|
||||||
if (Trace.isTracing()) {
|
TraceUtil.addTimelineAnnotation("blockCacheHit");
|
||||||
traceScope.getSpan().addTimelineAnnotation("blockCacheHit");
|
|
||||||
}
|
|
||||||
assert cachedBlock.isUnpacked() : "Packed block leak.";
|
assert cachedBlock.isUnpacked() : "Packed block leak.";
|
||||||
if (cachedBlock.getBlockType().isData()) {
|
if (cachedBlock.getBlockType().isData()) {
|
||||||
if (updateCacheMetrics) {
|
if (updateCacheMetrics) {
|
||||||
|
@ -1481,9 +1483,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
// Carry on, please load.
|
// Carry on, please load.
|
||||||
}
|
}
|
||||||
|
|
||||||
if (Trace.isTracing()) {
|
TraceUtil.addTimelineAnnotation("blockCacheMiss");
|
||||||
traceScope.getSpan().addTimelineAnnotation("blockCacheMiss");
|
|
||||||
}
|
|
||||||
// Load block from filesystem.
|
// Load block from filesystem.
|
||||||
HFileBlock hfileBlock =
|
HFileBlock hfileBlock =
|
||||||
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction);
|
fsBlockReader.readBlockData(dataBlockOffset, onDiskBlockSize, pread, !isCompaction);
|
||||||
|
@ -1505,7 +1505,6 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
return unpacked;
|
return unpacked;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
traceScope.close();
|
|
||||||
if (lockEntry != null) {
|
if (lockEntry != null) {
|
||||||
offsetLock.releaseLockEntry(lockEntry);
|
offsetLock.releaseLockEntry(lockEntry);
|
||||||
}
|
}
|
||||||
|
@ -1568,6 +1567,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
close(cacheConf.shouldEvictOnClose());
|
close(cacheConf.shouldEvictOnClose());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void close(boolean evictOnClose) throws IOException {
|
public void close(boolean evictOnClose) throws IOException {
|
||||||
PrefetchExecutor.cancel(path);
|
PrefetchExecutor.cancel(path);
|
||||||
if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
|
if (evictOnClose && cacheConf.isBlockCacheEnabled()) {
|
||||||
|
@ -1580,11 +1580,13 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
fsBlockReader.closeStreams();
|
fsBlockReader.closeStreams();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
|
public DataBlockEncoding getEffectiveEncodingInCache(boolean isCompaction) {
|
||||||
return dataBlockEncoder.getEffectiveEncodingInCache(isCompaction);
|
return dataBlockEncoder.getEffectiveEncodingInCache(isCompaction);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** For testing */
|
/** For testing */
|
||||||
|
@Override
|
||||||
public HFileBlock.FSReader getUncachedBlockReader() {
|
public HFileBlock.FSReader getUncachedBlockReader() {
|
||||||
return fsBlockReader;
|
return fsBlockReader;
|
||||||
}
|
}
|
||||||
|
@ -1612,6 +1614,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
return curBlock != null;
|
return curBlock != null;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public void setNonSeekedState() {
|
public void setNonSeekedState() {
|
||||||
reset();
|
reset();
|
||||||
}
|
}
|
||||||
|
@ -1713,6 +1716,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
|
protected Cell getFirstKeyCellInBlock(HFileBlock curBlock) {
|
||||||
return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock));
|
return dataBlockEncoder.getFirstKeyCellInBlock(getEncodedBuffer(curBlock));
|
||||||
}
|
}
|
||||||
|
@ -1730,6 +1734,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
return seeker.seekToKeyInBlock(key, seekBefore);
|
return seeker.seekToKeyInBlock(key, seekBefore);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public int compareKey(CellComparator comparator, Cell key) {
|
public int compareKey(CellComparator comparator, Cell key) {
|
||||||
return seeker.compareKey(comparator, key);
|
return seeker.compareKey(comparator, key);
|
||||||
}
|
}
|
||||||
|
@ -1776,6 +1781,7 @@ public class HFileReaderImpl implements HFile.Reader, Configurable {
|
||||||
* Returns false if block prefetching was requested for this file and has
|
* Returns false if block prefetching was requested for this file and has
|
||||||
* not completed, true otherwise
|
* not completed, true otherwise
|
||||||
*/
|
*/
|
||||||
|
@Override
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
public boolean prefetchComplete() {
|
public boolean prefetchComplete() {
|
||||||
return PrefetchExecutor.isCompleted(path);
|
return PrefetchExecutor.isCompleted(path);
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.util.Optional;
|
||||||
import org.apache.hadoop.hbase.CallDroppedException;
|
import org.apache.hadoop.hbase.CallDroppedException;
|
||||||
import org.apache.hadoop.hbase.CellScanner;
|
import org.apache.hadoop.hbase.CellScanner;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.yetus.audience.InterfaceStability;
|
import org.apache.yetus.audience.InterfaceStability;
|
||||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||||
|
@ -32,8 +33,6 @@ import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The request processing logic, which is usually executed in thread pools provided by an
|
* The request processing logic, which is usually executed in thread pools provided by an
|
||||||
|
@ -116,20 +115,17 @@ public class CallRunner {
|
||||||
String error = null;
|
String error = null;
|
||||||
Pair<Message, CellScanner> resultPair = null;
|
Pair<Message, CellScanner> resultPair = null;
|
||||||
RpcServer.CurCall.set(call);
|
RpcServer.CurCall.set(call);
|
||||||
TraceScope traceScope = null;
|
|
||||||
try {
|
try {
|
||||||
if (!this.rpcServer.isStarted()) {
|
if (!this.rpcServer.isStarted()) {
|
||||||
InetSocketAddress address = rpcServer.getListenerAddress();
|
InetSocketAddress address = rpcServer.getListenerAddress();
|
||||||
throw new ServerNotRunningYetException("Server " +
|
throw new ServerNotRunningYetException("Server " +
|
||||||
(address != null ? address : "(channel closed)") + " is not running yet");
|
(address != null ? address : "(channel closed)") + " is not running yet");
|
||||||
}
|
}
|
||||||
if (call.getTraceInfo() != null) {
|
String serviceName =
|
||||||
String serviceName =
|
call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
|
||||||
call.getService() != null ? call.getService().getDescriptorForType().getName() : "";
|
String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
|
||||||
String methodName = (call.getMethod() != null) ? call.getMethod().getName() : "";
|
String traceString = serviceName + "." + methodName;
|
||||||
String traceString = serviceName + "." + methodName;
|
TraceUtil.createTrace(traceString);
|
||||||
traceScope = Trace.startSpan(traceString, call.getTraceInfo());
|
|
||||||
}
|
|
||||||
// make the call
|
// make the call
|
||||||
resultPair = this.rpcServer.call(call, this.status);
|
resultPair = this.rpcServer.call(call, this.status);
|
||||||
} catch (TimeoutIOException e){
|
} catch (TimeoutIOException e){
|
||||||
|
@ -150,9 +146,6 @@ public class CallRunner {
|
||||||
throw (Error)e;
|
throw (Error)e;
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
if (traceScope != null) {
|
|
||||||
traceScope.close();
|
|
||||||
}
|
|
||||||
RpcServer.CurCall.set(null);
|
RpcServer.CurCall.set(null);
|
||||||
if (resultPair != null) {
|
if (resultPair != null) {
|
||||||
this.rpcServer.addCallSize(call.getSize() * -1);
|
this.rpcServer.addCallSize(call.getSize() * -1);
|
||||||
|
|
|
@ -181,7 +181,7 @@ public class NettyRpcServer extends RpcServer {
|
||||||
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
|
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
|
||||||
long startTime, int timeout) throws IOException {
|
long startTime, int timeout) throws IOException {
|
||||||
NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null,
|
NettyServerCall fakeCall = new NettyServerCall(-1, service, md, null, param, cellScanner, null,
|
||||||
-1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
|
-1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null);
|
||||||
return call(fakeCall, status);
|
return call(fakeCall, status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
|
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
|
||||||
|
@ -40,9 +39,9 @@ class NettyServerCall extends ServerCall<NettyServerRpcConnection> {
|
||||||
|
|
||||||
NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
|
NettyServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
|
||||||
Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size,
|
Message param, CellScanner cellScanner, NettyServerRpcConnection connection, long size,
|
||||||
TraceInfo tinfo, InetAddress remoteAddress, long receiveTime, int timeout,
|
InetAddress remoteAddress, long receiveTime, int timeout,
|
||||||
ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
|
ByteBufferPool reservoir, CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
|
||||||
super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
|
super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
|
||||||
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
|
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* RpcConnection implementation for netty rpc server.
|
* RpcConnection implementation for netty rpc server.
|
||||||
|
@ -119,9 +118,9 @@ class NettyServerRpcConnection extends ServerRpcConnection {
|
||||||
@Override
|
@Override
|
||||||
public NettyServerCall createCall(int id, final BlockingService service,
|
public NettyServerCall createCall(int id, final BlockingService service,
|
||||||
final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner,
|
final MethodDescriptor md, RequestHeader header, Message param, CellScanner cellScanner,
|
||||||
long size, TraceInfo tinfo, final InetAddress remoteAddress, int timeout,
|
long size, final InetAddress remoteAddress, int timeout,
|
||||||
CallCleanup reqCleanup) {
|
CallCleanup reqCleanup) {
|
||||||
return new NettyServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
|
return new NettyServerCall(id, service, md, header, param, cellScanner, this, size,
|
||||||
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
|
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
|
||||||
this.rpcServer.cellBlockBuilder, reqCleanup);
|
this.rpcServer.cellBlockBuilder, reqCleanup);
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface of all necessary to carry out a RPC method invocation on the server.
|
* Interface of all necessary to carry out a RPC method invocation on the server.
|
||||||
|
@ -133,9 +132,4 @@ public interface RpcCall extends RpcCallContext {
|
||||||
* @return A short string format of this call without possibly lengthy params
|
* @return A short string format of this call without possibly lengthy params
|
||||||
*/
|
*/
|
||||||
String toShortString();
|
String toShortString();
|
||||||
|
|
||||||
/**
|
|
||||||
* @return TraceInfo attached to this call.
|
|
||||||
*/
|
|
||||||
TraceInfo getTraceInfo();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -45,7 +45,6 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.ResponseHeade
|
||||||
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
import org.apache.hadoop.hbase.util.ByteBufferUtils;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Datastructure that holds all necessary to a method invocation and then afterward, carries
|
* Datastructure that holds all necessary to a method invocation and then afterward, carries
|
||||||
|
@ -79,7 +78,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
|
||||||
|
|
||||||
protected final long size; // size of current call
|
protected final long size; // size of current call
|
||||||
protected boolean isError;
|
protected boolean isError;
|
||||||
protected final TraceInfo tinfo;
|
|
||||||
protected ByteBufferListOutputStream cellBlockStream = null;
|
protected ByteBufferListOutputStream cellBlockStream = null;
|
||||||
protected CallCleanup reqCleanup = null;
|
protected CallCleanup reqCleanup = null;
|
||||||
|
|
||||||
|
@ -96,7 +94,7 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="NP_NULL_ON_SOME_PATH",
|
||||||
justification="Can't figure why this complaint is happening... see below")
|
justification="Can't figure why this complaint is happening... see below")
|
||||||
ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
|
ServerCall(int id, BlockingService service, MethodDescriptor md, RequestHeader header,
|
||||||
Message param, CellScanner cellScanner, T connection, long size, TraceInfo tinfo,
|
Message param, CellScanner cellScanner, T connection, long size,
|
||||||
InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
|
InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
|
||||||
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
|
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup) {
|
||||||
this.id = id;
|
this.id = id;
|
||||||
|
@ -110,7 +108,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
|
||||||
this.response = null;
|
this.response = null;
|
||||||
this.isError = false;
|
this.isError = false;
|
||||||
this.size = size;
|
this.size = size;
|
||||||
this.tinfo = tinfo;
|
|
||||||
if (connection != null) {
|
if (connection != null) {
|
||||||
this.user = connection.user;
|
this.user = connection.user;
|
||||||
this.retryImmediatelySupported = connection.retryImmediatelySupported;
|
this.retryImmediatelySupported = connection.retryImmediatelySupported;
|
||||||
|
@ -506,11 +503,6 @@ abstract class ServerCall<T extends ServerRpcConnection> implements RpcCall, Rpc
|
||||||
return connection.getRemotePort();
|
return connection.getRemotePort();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
|
||||||
public TraceInfo getTraceInfo() {
|
|
||||||
return tinfo;
|
|
||||||
}
|
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public synchronized BufferChain getResponse() {
|
public synchronized BufferChain getResponse() {
|
||||||
return response;
|
return response;
|
||||||
|
|
|
@ -77,7 +77,6 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/** Reads calls from a connection and queues them for handling. */
|
/** Reads calls from a connection and queues them for handling. */
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(
|
||||||
|
@ -632,7 +631,7 @@ abstract class ServerRpcConnection implements Closeable {
|
||||||
if ((totalRequestSize +
|
if ((totalRequestSize +
|
||||||
this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
|
this.rpcServer.callQueueSizeInBytes.sum()) > this.rpcServer.maxQueueSizeInBytes) {
|
||||||
final ServerCall<?> callTooBig = createCall(id, this.service, null, null, null, null,
|
final ServerCall<?> callTooBig = createCall(id, this.service, null, null, null, null,
|
||||||
totalRequestSize, null, null, 0, this.callCleanup);
|
totalRequestSize, null, 0, this.callCleanup);
|
||||||
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
|
this.rpcServer.metrics.exception(RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION);
|
||||||
callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
|
callTooBig.setResponse(null, null, RpcServer.CALL_QUEUE_TOO_BIG_EXCEPTION,
|
||||||
"Call queue is full on " + this.rpcServer.server.getServerName() +
|
"Call queue is full on " + this.rpcServer.server.getServerName() +
|
||||||
|
@ -694,21 +693,18 @@ abstract class ServerRpcConnection implements Closeable {
|
||||||
}
|
}
|
||||||
|
|
||||||
ServerCall<?> readParamsFailedCall = createCall(id, this.service, null, null, null, null,
|
ServerCall<?> readParamsFailedCall = createCall(id, this.service, null, null, null, null,
|
||||||
totalRequestSize, null, null, 0, this.callCleanup);
|
totalRequestSize, null, 0, this.callCleanup);
|
||||||
readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage());
|
readParamsFailedCall.setResponse(null, null, t, msg + "; " + t.getMessage());
|
||||||
readParamsFailedCall.sendResponseIfReady();
|
readParamsFailedCall.sendResponseIfReady();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
TraceInfo traceInfo = header.hasTraceInfo() ? new TraceInfo(header
|
|
||||||
.getTraceInfo().getTraceId(), header.getTraceInfo().getParentId())
|
|
||||||
: null;
|
|
||||||
int timeout = 0;
|
int timeout = 0;
|
||||||
if (header.hasTimeout() && header.getTimeout() > 0) {
|
if (header.hasTimeout() && header.getTimeout() > 0) {
|
||||||
timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
|
timeout = Math.max(this.rpcServer.minClientRequestTimeout, header.getTimeout());
|
||||||
}
|
}
|
||||||
ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize,
|
ServerCall<?> call = createCall(id, this.service, md, header, param, cellScanner, totalRequestSize,
|
||||||
traceInfo, this.addr, timeout, this.callCleanup);
|
this.addr, timeout, this.callCleanup);
|
||||||
|
|
||||||
if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
|
if (!this.rpcServer.scheduler.dispatch(new CallRunner(this.rpcServer, call))) {
|
||||||
this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
|
this.rpcServer.callQueueSizeInBytes.add(-1 * call.getSize());
|
||||||
|
@ -790,7 +786,7 @@ abstract class ServerRpcConnection implements Closeable {
|
||||||
public abstract boolean isConnectionOpen();
|
public abstract boolean isConnectionOpen();
|
||||||
|
|
||||||
public abstract ServerCall<?> createCall(int id, BlockingService service, MethodDescriptor md,
|
public abstract ServerCall<?> createCall(int id, BlockingService service, MethodDescriptor md,
|
||||||
RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
|
RequestHeader header, Message param, CellScanner cellScanner, long size,
|
||||||
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup);
|
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup);
|
||||||
|
|
||||||
private static class ByteBuffByteInput extends ByteInput {
|
private static class ByteBuffByteInput extends ByteInput {
|
||||||
|
|
|
@ -489,7 +489,7 @@ public class SimpleRpcServer extends RpcServer {
|
||||||
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
|
Message param, CellScanner cellScanner, long receiveTime, MonitoredRPCHandler status,
|
||||||
long startTime, int timeout) throws IOException {
|
long startTime, int timeout) throws IOException {
|
||||||
SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner,
|
SimpleServerCall fakeCall = new SimpleServerCall(-1, service, md, null, param, cellScanner,
|
||||||
null, -1, null, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
|
null, -1, null, receiveTime, timeout, reservoir, cellBlockBuilder, null, null);
|
||||||
return call(fakeCall, status);
|
return call(fakeCall, status);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.BlockingService;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Descriptors.MethodDescriptor;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
|
* Datastructure that holds all necessary to a method invocation and then afterward, carries the
|
||||||
|
@ -43,10 +42,10 @@ class SimpleServerCall extends ServerCall<SimpleServerRpcConnection> {
|
||||||
justification = "Can't figure why this complaint is happening... see below")
|
justification = "Can't figure why this complaint is happening... see below")
|
||||||
SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md,
|
SimpleServerCall(int id, final BlockingService service, final MethodDescriptor md,
|
||||||
RequestHeader header, Message param, CellScanner cellScanner,
|
RequestHeader header, Message param, CellScanner cellScanner,
|
||||||
SimpleServerRpcConnection connection, long size, TraceInfo tinfo,
|
SimpleServerRpcConnection connection, long size,
|
||||||
final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
|
final InetAddress remoteAddress, long receiveTime, int timeout, ByteBufferPool reservoir,
|
||||||
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) {
|
CellBlockBuilder cellBlockBuilder, CallCleanup reqCleanup, SimpleRpcServerResponder responder) {
|
||||||
super(id, service, md, header, param, cellScanner, connection, size, tinfo, remoteAddress,
|
super(id, service, md, header, param, cellScanner, connection, size, remoteAddress,
|
||||||
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
|
receiveTime, timeout, reservoir, cellBlockBuilder, reqCleanup);
|
||||||
this.responder = responder;
|
this.responder = responder;
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.RPCProtos.RequestHeader;
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
import org.apache.htrace.TraceInfo;
|
|
||||||
|
|
||||||
/** Reads calls from a connection and queues them for handling. */
|
/** Reads calls from a connection and queues them for handling. */
|
||||||
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT",
|
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value = "VO_VOLATILE_INCREMENT",
|
||||||
|
@ -212,7 +211,7 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
|
||||||
|
|
||||||
// Notify the client about the offending request
|
// Notify the client about the offending request
|
||||||
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
|
SimpleServerCall reqTooBig = new SimpleServerCall(header.getCallId(), this.service, null,
|
||||||
null, null, null, this, 0, null, this.addr, System.currentTimeMillis(), 0,
|
null, null, null, this, 0, this.addr, System.currentTimeMillis(), 0,
|
||||||
this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder);
|
this.rpcServer.reservoir, this.rpcServer.cellBlockBuilder, null, responder);
|
||||||
this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
|
this.rpcServer.metrics.exception(SimpleRpcServer.REQUEST_TOO_BIG_EXCEPTION);
|
||||||
// Make sure the client recognizes the underlying exception
|
// Make sure the client recognizes the underlying exception
|
||||||
|
@ -343,9 +342,9 @@ class SimpleServerRpcConnection extends ServerRpcConnection {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md,
|
public SimpleServerCall createCall(int id, BlockingService service, MethodDescriptor md,
|
||||||
RequestHeader header, Message param, CellScanner cellScanner, long size, TraceInfo tinfo,
|
RequestHeader header, Message param, CellScanner cellScanner, long size,
|
||||||
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
|
InetAddress remoteAddress, int timeout, CallCleanup reqCleanup) {
|
||||||
return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size, tinfo,
|
return new SimpleServerCall(id, service, md, header, param, cellScanner, this, size,
|
||||||
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
|
remoteAddress, System.currentTimeMillis(), timeout, this.rpcServer.reservoir,
|
||||||
this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
|
this.rpcServer.cellBlockBuilder, reqCleanup, this.responder);
|
||||||
}
|
}
|
||||||
|
|
|
@ -161,6 +161,7 @@ import org.apache.hadoop.hbase.replication.master.TableCFsUpdater;
|
||||||
import org.apache.hadoop.hbase.replication.regionserver.Replication;
|
import org.apache.hadoop.hbase.replication.regionserver.Replication;
|
||||||
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
import org.apache.hadoop.hbase.security.AccessDeniedException;
|
||||||
import org.apache.hadoop.hbase.security.UserProvider;
|
import org.apache.hadoop.hbase.security.UserProvider;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.CompressionTest;
|
import org.apache.hadoop.hbase.util.CompressionTest;
|
||||||
|
@ -470,6 +471,7 @@ public class HMaster extends HRegionServer implements MasterServices {
|
||||||
public HMaster(final Configuration conf)
|
public HMaster(final Configuration conf)
|
||||||
throws IOException, KeeperException {
|
throws IOException, KeeperException {
|
||||||
super(conf);
|
super(conf);
|
||||||
|
TraceUtil.initTracer(conf);
|
||||||
try {
|
try {
|
||||||
this.rsFatals = new MemoryBoundedLogMessageBuffer(
|
this.rsFatals = new MemoryBoundedLogMessageBuffer(
|
||||||
conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
|
conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
|
||||||
|
|
|
@ -34,6 +34,7 @@ import org.apache.hadoop.hbase.LocalHBaseCluster;
|
||||||
import org.apache.hadoop.hbase.MasterNotRunningException;
|
import org.apache.hadoop.hbase.MasterNotRunningException;
|
||||||
import org.apache.hadoop.hbase.ZNodeClearer;
|
import org.apache.hadoop.hbase.ZNodeClearer;
|
||||||
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
import org.apache.hadoop.hbase.ZooKeeperConnectionException;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.Connection;
|
import org.apache.hadoop.hbase.client.Connection;
|
||||||
|
@ -147,6 +148,8 @@ public class HMasterCommandLine extends ServerCommandLine {
|
||||||
|
|
||||||
private int startMaster() {
|
private int startMaster() {
|
||||||
Configuration conf = getConf();
|
Configuration conf = getConf();
|
||||||
|
TraceUtil.initTracer(conf);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// If 'local', defer to LocalHBaseCluster instance. Starts master
|
// If 'local', defer to LocalHBaseCluster instance. Starts master
|
||||||
// and regionserver both in the one JVM.
|
// and regionserver both in the one JVM.
|
||||||
|
|
|
@ -91,11 +91,11 @@ import org.apache.hadoop.hbase.ExtendedCellBuilderFactory;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
|
import org.apache.hadoop.hbase.HConstants.OperationStatusCode;
|
||||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||||
import org.apache.hadoop.hbase.PrivateCellUtil;
|
|
||||||
import org.apache.hadoop.hbase.KeyValue;
|
import org.apache.hadoop.hbase.KeyValue;
|
||||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||||
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
import org.apache.hadoop.hbase.NamespaceDescriptor;
|
||||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||||
|
import org.apache.hadoop.hbase.PrivateCellUtil;
|
||||||
import org.apache.hadoop.hbase.RegionTooBusyException;
|
import org.apache.hadoop.hbase.RegionTooBusyException;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.Tag;
|
import org.apache.hadoop.hbase.Tag;
|
||||||
|
@ -149,33 +149,6 @@ import org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController
|
||||||
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
|
import org.apache.hadoop.hbase.regionserver.throttle.ThroughputController;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
|
import org.apache.hadoop.hbase.regionserver.wal.WALUtil;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
|
||||||
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
|
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
|
||||||
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
|
||||||
import org.apache.hadoop.hbase.util.CollectionUtils;
|
|
||||||
import org.apache.hadoop.hbase.util.CompressionTest;
|
|
||||||
import org.apache.hadoop.hbase.util.EncryptionTest;
|
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
|
||||||
import org.apache.hadoop.hbase.util.HashedBytes;
|
|
||||||
import org.apache.hadoop.hbase.util.NonceKey;
|
|
||||||
import org.apache.hadoop.hbase.util.Pair;
|
|
||||||
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
|
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
|
||||||
import org.apache.hadoop.hbase.wal.WAL;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALFactory;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALKey;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALSplitter;
|
|
||||||
import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
|
|
||||||
import org.apache.hadoop.io.MultipleIOException;
|
|
||||||
import org.apache.hadoop.util.StringUtils;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
|
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
|
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Lists;
|
||||||
|
@ -198,6 +171,32 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescript
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
|
||||||
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
|
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils;
|
||||||
|
import org.apache.hadoop.hbase.snapshot.SnapshotManifest;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
|
import org.apache.hadoop.hbase.util.CancelableProgressable;
|
||||||
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
|
import org.apache.hadoop.hbase.util.CollectionUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.CompressionTest;
|
||||||
|
import org.apache.hadoop.hbase.util.EncryptionTest;
|
||||||
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
|
import org.apache.hadoop.hbase.util.HashedBytes;
|
||||||
|
import org.apache.hadoop.hbase.util.NonceKey;
|
||||||
|
import org.apache.hadoop.hbase.util.Pair;
|
||||||
|
import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
|
||||||
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
|
import org.apache.hadoop.hbase.wal.WAL;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALFactory;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALKey;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALSplitter;
|
||||||
|
import org.apache.hadoop.hbase.wal.WALSplitter.MutationReplay;
|
||||||
|
import org.apache.hadoop.io.MultipleIOException;
|
||||||
|
import org.apache.hadoop.util.StringUtils;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import edu.umd.cs.findbugs.annotations.Nullable;
|
import edu.umd.cs.findbugs.annotations.Nullable;
|
||||||
|
|
||||||
|
@ -3727,6 +3726,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce));
|
return batchMutate(new MutationBatchOperation(this, mutations, atomic, nonceGroup, nonce));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
|
public OperationStatus[] batchMutate(Mutation[] mutations) throws IOException {
|
||||||
return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
|
return batchMutate(mutations, HConstants.NO_NONCE, HConstants.NO_NONCE);
|
||||||
}
|
}
|
||||||
|
@ -5560,16 +5560,10 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
|
|
||||||
RowLockContext rowLockContext = null;
|
RowLockContext rowLockContext = null;
|
||||||
RowLockImpl result = null;
|
RowLockImpl result = null;
|
||||||
TraceScope traceScope = null;
|
|
||||||
|
|
||||||
// If we're tracing start a span to show how long this took.
|
|
||||||
if (Trace.isTracing()) {
|
|
||||||
traceScope = Trace.startSpan("HRegion.getRowLock");
|
|
||||||
traceScope.getSpan().addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean success = false;
|
boolean success = false;
|
||||||
try {
|
try (TraceScope scope = TraceUtil.createTrace("HRegion.getRowLock")) {
|
||||||
|
TraceUtil.addTimelineAnnotation("Getting a " + (readLock?"readLock":"writeLock"));
|
||||||
// Keep trying until we have a lock or error out.
|
// Keep trying until we have a lock or error out.
|
||||||
// TODO: do we need to add a time component here?
|
// TODO: do we need to add a time component here?
|
||||||
while (result == null) {
|
while (result == null) {
|
||||||
|
@ -5598,9 +5592,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
}
|
}
|
||||||
|
|
||||||
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
|
if (timeout <= 0 || !result.getLock().tryLock(timeout, TimeUnit.MILLISECONDS)) {
|
||||||
if (traceScope != null) {
|
TraceUtil.addTimelineAnnotation("Failed to get row lock");
|
||||||
traceScope.getSpan().addTimelineAnnotation("Failed to get row lock");
|
|
||||||
}
|
|
||||||
result = null;
|
result = null;
|
||||||
String message = "Timed out waiting for lock for row: " + rowKey + " in region "
|
String message = "Timed out waiting for lock for row: " + rowKey + " in region "
|
||||||
+ getRegionInfo().getEncodedName();
|
+ getRegionInfo().getEncodedName();
|
||||||
|
@ -5618,9 +5610,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
|
LOG.warn("Thread interrupted waiting for lock on row: " + rowKey);
|
||||||
InterruptedIOException iie = new InterruptedIOException();
|
InterruptedIOException iie = new InterruptedIOException();
|
||||||
iie.initCause(ie);
|
iie.initCause(ie);
|
||||||
if (traceScope != null) {
|
TraceUtil.addTimelineAnnotation("Interrupted exception getting row lock");
|
||||||
traceScope.getSpan().addTimelineAnnotation("Interrupted exception getting row lock");
|
|
||||||
}
|
|
||||||
Thread.currentThread().interrupt();
|
Thread.currentThread().interrupt();
|
||||||
throw iie;
|
throw iie;
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -5628,9 +5618,6 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
||||||
if (!success && rowLockContext != null) {
|
if (!success && rowLockContext != null) {
|
||||||
rowLockContext.cleanUp();
|
rowLockContext.cleanUp();
|
||||||
}
|
}
|
||||||
if (traceScope != null) {
|
|
||||||
traceScope.close();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -139,6 +139,7 @@ import org.apache.hadoop.hbase.security.Superusers;
|
||||||
import org.apache.hadoop.hbase.security.User;
|
import org.apache.hadoop.hbase.security.User;
|
||||||
import org.apache.hadoop.hbase.security.UserProvider;
|
import org.apache.hadoop.hbase.security.UserProvider;
|
||||||
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Addressing;
|
import org.apache.hadoop.hbase.util.Addressing;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.CompressionTest;
|
import org.apache.hadoop.hbase.util.CompressionTest;
|
||||||
|
@ -526,6 +527,7 @@ public class HRegionServer extends HasThread implements
|
||||||
// Defer till after we register with the Master as much as possible. See #startServices.
|
// Defer till after we register with the Master as much as possible. See #startServices.
|
||||||
public HRegionServer(Configuration conf) throws IOException {
|
public HRegionServer(Configuration conf) throws IOException {
|
||||||
super("RegionServer"); // thread name
|
super("RegionServer"); // thread name
|
||||||
|
TraceUtil.initTracer(conf);
|
||||||
try {
|
try {
|
||||||
this.startcode = System.currentTimeMillis();
|
this.startcode = System.currentTimeMillis();
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.hbase.regionserver;
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
|
@ -50,6 +51,7 @@ public class HRegionServerCommandLine extends ServerCommandLine {
|
||||||
|
|
||||||
private int start() throws Exception {
|
private int start() throws Exception {
|
||||||
Configuration conf = getConf();
|
Configuration conf = getConf();
|
||||||
|
TraceUtil.initTracer(conf);
|
||||||
try {
|
try {
|
||||||
// If 'local', don't start a region server here. Defer to
|
// If 'local', don't start a region server here. Defer to
|
||||||
// LocalHBaseCluster. It manages 'local' clusters.
|
// LocalHBaseCluster. It manages 'local' clusters.
|
||||||
|
|
|
@ -44,6 +44,8 @@ import org.apache.hadoop.hbase.DroppedSnapshotException;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
||||||
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
|
import org.apache.hadoop.hbase.regionserver.HRegion.FlushResult;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
import org.apache.hadoop.hbase.util.HasThread;
|
import org.apache.hadoop.hbase.util.HasThread;
|
||||||
|
@ -51,12 +53,9 @@ import org.apache.hadoop.hbase.util.ServerRegionReplicaUtil;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
|
import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.base.Preconditions;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Thread that flushes cache on request
|
* Thread that flushes cache on request
|
||||||
*
|
*
|
||||||
|
@ -447,7 +446,7 @@ class MemStoreFlusher implements FlushRequester {
|
||||||
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
|
"store files; delaying flush up to " + this.blockingWaitTime + "ms");
|
||||||
if (!this.server.compactSplitThread.requestSplit(region)) {
|
if (!this.server.compactSplitThread.requestSplit(region)) {
|
||||||
try {
|
try {
|
||||||
this.server.compactSplitThread.requestSystemCompaction((HRegion) region,
|
this.server.compactSplitThread.requestSystemCompaction(region,
|
||||||
Thread.currentThread().getName());
|
Thread.currentThread().getName());
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
e = e instanceof RemoteException ?
|
e = e instanceof RemoteException ?
|
||||||
|
@ -572,12 +571,10 @@ class MemStoreFlusher implements FlushRequester {
|
||||||
* amount of memstore consumption.
|
* amount of memstore consumption.
|
||||||
*/
|
*/
|
||||||
public void reclaimMemStoreMemory() {
|
public void reclaimMemStoreMemory() {
|
||||||
TraceScope scope = Trace.startSpan("MemStoreFluser.reclaimMemStoreMemory");
|
TraceScope scope = TraceUtil.createTrace("MemStoreFluser.reclaimMemStoreMemory");
|
||||||
FlushType flushType = isAboveHighWaterMark();
|
FlushType flushType = isAboveHighWaterMark();
|
||||||
if (flushType != FlushType.NORMAL) {
|
if (flushType != FlushType.NORMAL) {
|
||||||
if (Trace.isTracing()) {
|
TraceUtil.addTimelineAnnotation("Force Flush. We're above high water mark.");
|
||||||
scope.getSpan().addTimelineAnnotation("Force Flush. We're above high water mark.");
|
|
||||||
}
|
|
||||||
long start = EnvironmentEdgeManager.currentTime();
|
long start = EnvironmentEdgeManager.currentTime();
|
||||||
synchronized (this.blockSignal) {
|
synchronized (this.blockSignal) {
|
||||||
boolean blocked = false;
|
boolean blocked = false;
|
||||||
|
@ -640,7 +637,9 @@ class MemStoreFlusher implements FlushRequester {
|
||||||
} else if (isAboveLowWaterMark() != FlushType.NORMAL) {
|
} else if (isAboveLowWaterMark() != FlushType.NORMAL) {
|
||||||
wakeupFlushThread();
|
wakeupFlushThread();
|
||||||
}
|
}
|
||||||
scope.close();
|
if(scope!= null) {
|
||||||
|
scope.close();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void logMsg(String string1, long val, long max) {
|
private void logMsg(String string1, long val, long max) {
|
||||||
|
|
|
@ -59,6 +59,8 @@ import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||||
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
|
import org.apache.hadoop.hbase.io.util.MemorySizeUtil;
|
||||||
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
import org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.CollectionUtils;
|
import org.apache.hadoop.hbase.util.CollectionUtils;
|
||||||
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
import org.apache.hadoop.hbase.util.CommonFSUtils;
|
||||||
|
@ -72,14 +74,10 @@ import org.apache.hadoop.hbase.wal.WALKey;
|
||||||
import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
|
import org.apache.hadoop.hbase.wal.WALProvider.WriterBase;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.htrace.NullScope;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
|
||||||
|
|
||||||
import com.lmax.disruptor.RingBuffer;
|
import com.lmax.disruptor.RingBuffer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -681,8 +679,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
||||||
* @throws IOException if there is a problem flushing or closing the underlying FS
|
* @throws IOException if there is a problem flushing or closing the underlying FS
|
||||||
*/
|
*/
|
||||||
Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
|
Path replaceWriter(Path oldPath, Path newPath, W nextWriter) throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("FSHFile.replaceWriter");
|
try (TraceScope scope = TraceUtil.createTrace("FSHFile.replaceWriter")) {
|
||||||
try {
|
|
||||||
long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter);
|
long oldFileLen = doReplaceWriter(oldPath, newPath, nextWriter);
|
||||||
int oldNumEntries = this.numEntries.getAndSet(0);
|
int oldNumEntries = this.numEntries.getAndSet(0);
|
||||||
final String newPathString = (null == newPath ? null : CommonFSUtils.getPath(newPath));
|
final String newPathString = (null == newPath ? null : CommonFSUtils.getPath(newPath));
|
||||||
|
@ -696,16 +693,16 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
||||||
LOG.info("New WAL " + newPathString);
|
LOG.info("New WAL " + newPathString);
|
||||||
}
|
}
|
||||||
return newPath;
|
return newPath;
|
||||||
} finally {
|
|
||||||
scope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
|
protected Span blockOnSync(final SyncFuture syncFuture) throws IOException {
|
||||||
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
|
// Now we have published the ringbuffer, halt the current thread until we get an answer back.
|
||||||
try {
|
try {
|
||||||
syncFuture.get(walSyncTimeoutNs);
|
if (syncFuture != null) {
|
||||||
return syncFuture.getSpan();
|
syncFuture.get(walSyncTimeoutNs);
|
||||||
|
}
|
||||||
|
return (syncFuture == null) ? null : syncFuture.getSpan();
|
||||||
} catch (TimeoutIOException tioe) {
|
} catch (TimeoutIOException tioe) {
|
||||||
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
|
// SyncFuture reuse by thread, if TimeoutIOException happens, ringbuffer
|
||||||
// still refer to it, so if this thread use it next time may get a wrong
|
// still refer to it, so if this thread use it next time may get a wrong
|
||||||
|
@ -748,8 +745,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
||||||
LOG.debug("WAL closing. Skipping rolling of writer");
|
LOG.debug("WAL closing. Skipping rolling of writer");
|
||||||
return regionsToFlush;
|
return regionsToFlush;
|
||||||
}
|
}
|
||||||
TraceScope scope = Trace.startSpan("FSHLog.rollWriter");
|
try (TraceScope scope = TraceUtil.createTrace("FSHLog.rollWriter")) {
|
||||||
try {
|
|
||||||
Path oldPath = getOldPath();
|
Path oldPath = getOldPath();
|
||||||
Path newPath = getNewPath();
|
Path newPath = getNewPath();
|
||||||
// Any exception from here on is catastrophic, non-recoverable so we currently abort.
|
// Any exception from here on is catastrophic, non-recoverable so we currently abort.
|
||||||
|
@ -774,8 +770,6 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
||||||
"for details.", exception);
|
"for details.", exception);
|
||||||
} finally {
|
} finally {
|
||||||
closeBarrier.endOp();
|
closeBarrier.endOp();
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
|
||||||
scope.close();
|
|
||||||
}
|
}
|
||||||
return regionsToFlush;
|
return regionsToFlush;
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -950,7 +944,7 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
||||||
if (timeInNanos > this.slowSyncNs) {
|
if (timeInNanos > this.slowSyncNs) {
|
||||||
String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000)
|
String msg = new StringBuilder().append("Slow sync cost: ").append(timeInNanos / 1000000)
|
||||||
.append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString();
|
.append(" ms, current pipeline: ").append(Arrays.toString(getPipeline())).toString();
|
||||||
Trace.addTimelineAnnotation(msg);
|
TraceUtil.addTimelineAnnotation(msg);
|
||||||
LOG.info(msg);
|
LOG.info(msg);
|
||||||
}
|
}
|
||||||
if (!listeners.isEmpty()) {
|
if (!listeners.isEmpty()) {
|
||||||
|
@ -966,16 +960,20 @@ public abstract class AbstractFSWAL<W extends WriterBase> implements WAL {
|
||||||
if (this.closed) {
|
if (this.closed) {
|
||||||
throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
|
throw new IOException("Cannot append; log is closed, regionName = " + hri.getRegionNameAsString());
|
||||||
}
|
}
|
||||||
TraceScope scope = Trace.startSpan(implClassName + ".append");
|
|
||||||
MutableLong txidHolder = new MutableLong();
|
MutableLong txidHolder = new MutableLong();
|
||||||
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
|
MultiVersionConcurrencyControl.WriteEntry we = key.getMvcc().begin(() -> {
|
||||||
txidHolder.setValue(ringBuffer.next());
|
txidHolder.setValue(ringBuffer.next());
|
||||||
});
|
});
|
||||||
long txid = txidHolder.longValue();
|
long txid = txidHolder.longValue();
|
||||||
try {
|
try (TraceScope scope = TraceUtil.createTrace(implClassName + ".append")) {
|
||||||
FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
|
FSWALEntry entry = new FSWALEntry(txid, key, edits, hri, inMemstore);
|
||||||
entry.stampRegionSequenceId(we);
|
entry.stampRegionSequenceId(we);
|
||||||
ringBuffer.get(txid).load(entry, scope.detach());
|
if(scope!=null){
|
||||||
|
ringBuffer.get(txid).load(entry, scope.getSpan());
|
||||||
|
}
|
||||||
|
else{
|
||||||
|
ringBuffer.get(txid).load(entry, null);
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
ringBuffer.publish(txid);
|
ringBuffer.publish(txid);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,8 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.ConnectionUtils;
|
import org.apache.hadoop.hbase.client.ConnectionUtils;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
|
import org.apache.hadoop.hbase.io.asyncfs.AsyncFSOutput;
|
||||||
|
@ -52,18 +54,14 @@ import org.apache.hadoop.hbase.wal.AsyncFSWALProvider;
|
||||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||||
import org.apache.hadoop.hbase.wal.WALKey;
|
import org.apache.hadoop.hbase.wal.WALKey;
|
||||||
import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
|
import org.apache.hadoop.hbase.wal.WALProvider.AsyncWriter;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
|
||||||
import org.apache.htrace.NullScope;
|
|
||||||
import org.apache.htrace.Span;
|
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
import org.apache.hadoop.hbase.shaded.com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||||
import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
|
import org.apache.hadoop.hbase.shaded.io.netty.channel.Channel;
|
||||||
import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
|
import org.apache.hadoop.hbase.shaded.io.netty.channel.EventLoop;
|
||||||
import org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor;
|
import org.apache.hadoop.hbase.shaded.io.netty.util.concurrent.SingleThreadEventExecutor;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
import org.apache.htrace.core.Span;
|
||||||
|
import org.apache.htrace.core.TraceScope;
|
||||||
|
|
||||||
import com.lmax.disruptor.RingBuffer;
|
import com.lmax.disruptor.RingBuffer;
|
||||||
import com.lmax.disruptor.Sequence;
|
import com.lmax.disruptor.Sequence;
|
||||||
|
@ -342,9 +340,9 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
}
|
}
|
||||||
|
|
||||||
private void addTimeAnnotation(SyncFuture future, String annotation) {
|
private void addTimeAnnotation(SyncFuture future, String annotation) {
|
||||||
TraceScope scope = Trace.continueSpan(future.getSpan());
|
TraceUtil.addTimelineAnnotation(annotation);
|
||||||
Trace.addTimelineAnnotation(annotation);
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
future.setSpan(scope.detach());
|
//future.setSpan(scope.getSpan());
|
||||||
}
|
}
|
||||||
|
|
||||||
private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
|
private int finishSyncLowerThanTxid(long txid, boolean addSyncTrace) {
|
||||||
|
@ -415,14 +413,16 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
Span span = entry.detachSpan();
|
Span span = entry.detachSpan();
|
||||||
// the span maybe null if this is a retry after rolling.
|
// the span maybe null if this is a retry after rolling.
|
||||||
if (span != null) {
|
if (span != null) {
|
||||||
TraceScope scope = Trace.continueSpan(span);
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
|
//TraceScope scope = Trace.continueSpan(span);
|
||||||
try {
|
try {
|
||||||
appended = append(writer, entry);
|
appended = append(writer, entry);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
throw new AssertionError("should not happen", e);
|
throw new AssertionError("should not happen", e);
|
||||||
} finally {
|
} finally {
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
scope.close(); // append scope is complete
|
//assert scope == NullScope.INSTANCE || !scope.isDetached();
|
||||||
|
//scope.close(); // append scope is complete
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
try {
|
try {
|
||||||
|
@ -559,24 +559,26 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void sync() throws IOException {
|
public void sync() throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
|
try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")){
|
||||||
try {
|
|
||||||
long txid = waitingConsumePayloads.next();
|
long txid = waitingConsumePayloads.next();
|
||||||
SyncFuture future;
|
SyncFuture future = null;
|
||||||
try {
|
try {
|
||||||
future = getSyncFuture(txid, scope.detach());
|
if (scope != null) {
|
||||||
RingBufferTruck truck = waitingConsumePayloads.get(txid);
|
future = getSyncFuture(txid, scope.getSpan());
|
||||||
truck.load(future);
|
RingBufferTruck truck = waitingConsumePayloads.get(txid);
|
||||||
|
truck.load(future);
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
waitingConsumePayloads.publish(txid);
|
waitingConsumePayloads.publish(txid);
|
||||||
}
|
}
|
||||||
if (shouldScheduleConsumer()) {
|
if (shouldScheduleConsumer()) {
|
||||||
eventLoop.execute(consumer);
|
eventLoop.execute(consumer);
|
||||||
}
|
}
|
||||||
scope = Trace.continueSpan(blockOnSync(future));
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
} finally {
|
//scope = Trace.continueSpan(blockOnSync(future));
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
if (future != null) {
|
||||||
scope.close();
|
blockOnSync(future);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -585,25 +587,27 @@ public class AsyncFSWAL extends AbstractFSWAL<AsyncWriter> {
|
||||||
if (highestSyncedTxid.get() >= txid) {
|
if (highestSyncedTxid.get() >= txid) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
TraceScope scope = Trace.startSpan("AsyncFSWAL.sync");
|
try (TraceScope scope = TraceUtil.createTrace("AsyncFSWAL.sync")) {
|
||||||
try {
|
|
||||||
// here we do not use ring buffer sequence as txid
|
// here we do not use ring buffer sequence as txid
|
||||||
long sequence = waitingConsumePayloads.next();
|
long sequence = waitingConsumePayloads.next();
|
||||||
SyncFuture future;
|
SyncFuture future = null;
|
||||||
try {
|
try {
|
||||||
future = getSyncFuture(txid, scope.detach());
|
if(scope!= null) {
|
||||||
RingBufferTruck truck = waitingConsumePayloads.get(sequence);
|
future = getSyncFuture(txid, scope.getSpan());
|
||||||
truck.load(future);
|
RingBufferTruck truck = waitingConsumePayloads.get(sequence);
|
||||||
|
truck.load(future);
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
waitingConsumePayloads.publish(sequence);
|
waitingConsumePayloads.publish(sequence);
|
||||||
}
|
}
|
||||||
if (shouldScheduleConsumer()) {
|
if (shouldScheduleConsumer()) {
|
||||||
eventLoop.execute(consumer);
|
eventLoop.execute(consumer);
|
||||||
}
|
}
|
||||||
scope = Trace.continueSpan(blockOnSync(future));
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
} finally {
|
//scope = Trace.continueSpan(blockOnSync(future));
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
if (future != null) {
|
||||||
scope.close();
|
blockOnSync(future);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||||
import org.apache.hadoop.hbase.HConstants;
|
import org.apache.hadoop.hbase.HConstants;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.ClassSize;
|
import org.apache.hadoop.hbase.util.ClassSize;
|
||||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||||
|
@ -54,10 +55,8 @@ import org.apache.hadoop.hbase.wal.WALSplitter;
|
||||||
import org.apache.hadoop.hdfs.DFSOutputStream;
|
import org.apache.hadoop.hdfs.DFSOutputStream;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
import org.apache.hadoop.hdfs.client.HdfsDataOutputStream;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.htrace.NullScope;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.Trace;
|
|
||||||
import org.apache.htrace.TraceScope;
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
||||||
|
@ -345,7 +344,7 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
// use assert to make sure no change breaks the logic that
|
// use assert to make sure no change breaks the logic that
|
||||||
// sequence and zigzagLatch will be set together
|
// sequence and zigzagLatch will be set together
|
||||||
assert sequence > 0L : "Failed to get sequence from ring buffer";
|
assert sequence > 0L : "Failed to get sequence from ring buffer";
|
||||||
Trace.addTimelineAnnotation("awaiting safepoint");
|
TraceUtil.addTimelineAnnotation("awaiting safepoint");
|
||||||
syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence));
|
syncFuture = zigzagLatch.waitSafePoint(publishSyncOnRingBuffer(sequence));
|
||||||
}
|
}
|
||||||
} catch (FailedSyncBeforeLogCloseException e) {
|
} catch (FailedSyncBeforeLogCloseException e) {
|
||||||
|
@ -361,9 +360,9 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
if (this.writer != null) {
|
if (this.writer != null) {
|
||||||
oldFileLen = this.writer.getLength();
|
oldFileLen = this.writer.getLength();
|
||||||
try {
|
try {
|
||||||
Trace.addTimelineAnnotation("closing writer");
|
TraceUtil.addTimelineAnnotation("closing writer");
|
||||||
this.writer.close();
|
this.writer.close();
|
||||||
Trace.addTimelineAnnotation("writer closed");
|
TraceUtil.addTimelineAnnotation("writer closed");
|
||||||
this.closeErrorCount.set(0);
|
this.closeErrorCount.set(0);
|
||||||
} catch (IOException ioe) {
|
} catch (IOException ioe) {
|
||||||
int errors = closeErrorCount.incrementAndGet();
|
int errors = closeErrorCount.incrementAndGet();
|
||||||
|
@ -595,13 +594,14 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
}
|
}
|
||||||
// I got something. Lets run. Save off current sequence number in case it changes
|
// I got something. Lets run. Save off current sequence number in case it changes
|
||||||
// while we run.
|
// while we run.
|
||||||
TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
|
//TraceScope scope = Trace.continueSpan(takeSyncFuture.getSpan());
|
||||||
long start = System.nanoTime();
|
long start = System.nanoTime();
|
||||||
Throwable lastException = null;
|
Throwable lastException = null;
|
||||||
try {
|
try {
|
||||||
Trace.addTimelineAnnotation("syncing writer");
|
TraceUtil.addTimelineAnnotation("syncing writer");
|
||||||
writer.sync();
|
writer.sync();
|
||||||
Trace.addTimelineAnnotation("writer synced");
|
TraceUtil.addTimelineAnnotation("writer synced");
|
||||||
currentSequence = updateHighestSyncedSequence(currentSequence);
|
currentSequence = updateHighestSyncedSequence(currentSequence);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
LOG.error("Error syncing, request close of WAL", e);
|
LOG.error("Error syncing, request close of WAL", e);
|
||||||
|
@ -611,7 +611,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
lastException = e;
|
lastException = e;
|
||||||
} finally {
|
} finally {
|
||||||
// reattach the span to the future before releasing.
|
// reattach the span to the future before releasing.
|
||||||
takeSyncFuture.setSpan(scope.detach());
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
|
// takeSyncFuture.setSpan(scope.getSpan());
|
||||||
// First release what we 'took' from the queue.
|
// First release what we 'took' from the queue.
|
||||||
syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException);
|
syncCount += releaseSyncFuture(takeSyncFuture, currentSequence, lastException);
|
||||||
// Can we release other syncs?
|
// Can we release other syncs?
|
||||||
|
@ -727,8 +728,15 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sync all known transactions
|
// Sync all known transactions
|
||||||
private Span publishSyncThenBlockOnCompletion(Span span) throws IOException {
|
private void publishSyncThenBlockOnCompletion(TraceScope scope) throws IOException {
|
||||||
return blockOnSync(publishSyncOnRingBuffer(span));
|
if (scope != null) {
|
||||||
|
SyncFuture syncFuture = publishSyncOnRingBuffer(scope.getSpan());
|
||||||
|
blockOnSync(syncFuture);
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
SyncFuture syncFuture = publishSyncOnRingBuffer(null);
|
||||||
|
blockOnSync(syncFuture);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -754,12 +762,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void sync() throws IOException {
|
public void sync() throws IOException {
|
||||||
TraceScope scope = Trace.startSpan("FSHLog.sync");
|
try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
|
||||||
try {
|
publishSyncThenBlockOnCompletion(scope);
|
||||||
scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
|
|
||||||
} finally {
|
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
|
||||||
scope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -769,12 +773,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
// Already sync'd.
|
// Already sync'd.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
TraceScope scope = Trace.startSpan("FSHLog.sync");
|
try (TraceScope scope = TraceUtil.createTrace("FSHLog.sync")) {
|
||||||
try {
|
publishSyncThenBlockOnCompletion(scope);
|
||||||
scope = Trace.continueSpan(publishSyncThenBlockOnCompletion(scope.detach()));
|
|
||||||
} finally {
|
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
|
||||||
scope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -996,7 +996,8 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
}
|
}
|
||||||
} else if (truck.type() == RingBufferTruck.Type.APPEND) {
|
} else if (truck.type() == RingBufferTruck.Type.APPEND) {
|
||||||
FSWALEntry entry = truck.unloadAppend();
|
FSWALEntry entry = truck.unloadAppend();
|
||||||
TraceScope scope = Trace.continueSpan(entry.detachSpan());
|
//TODO handle htrace API change, see HBASE-18895
|
||||||
|
//TraceScope scope = Trace.continueSpan(entry.detachSpan());
|
||||||
try {
|
try {
|
||||||
|
|
||||||
if (this.exception != null) {
|
if (this.exception != null) {
|
||||||
|
@ -1015,9 +1016,6 @@ public class FSHLog extends AbstractFSWAL<Writer> {
|
||||||
: new DamagedWALException("On sync", this.exception));
|
: new DamagedWALException("On sync", this.exception));
|
||||||
// Return to keep processing events coming off the ringbuffer
|
// Return to keep processing events coming off the ringbuffer
|
||||||
return;
|
return;
|
||||||
} finally {
|
|
||||||
assert scope == NullScope.INSTANCE || !scope.isDetached();
|
|
||||||
scope.close(); // append scope is complete
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// What is this if not an append or sync. Fail all up to this!!!
|
// What is this if not an append or sync. Fail all up to this!!!
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.hbase.util.CollectionUtils;
|
||||||
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
||||||
import org.apache.hadoop.hbase.wal.WALEdit;
|
import org.apache.hadoop.hbase.wal.WALEdit;
|
||||||
import org.apache.hadoop.hbase.wal.WALKey;
|
import org.apache.hadoop.hbase.wal.WALKey;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.hbase.shaded.com.google.common.annotations.VisibleForTesting;
|
||||||
|
|
|
@ -18,8 +18,8 @@
|
||||||
*/
|
*/
|
||||||
package org.apache.hadoop.hbase.regionserver.wal;
|
package org.apache.hadoop.hbase.regionserver.wal;
|
||||||
|
|
||||||
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.htrace.Span;
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a
|
* A 'truck' to carry a payload across the ring buffer from Handler to WAL. Has EITHER a
|
||||||
|
|
|
@ -22,7 +22,7 @@ import java.util.concurrent.TimeUnit;
|
||||||
|
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
import org.apache.hadoop.hbase.exceptions.TimeoutIOException;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Span;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the
|
* A Future on a filesystem sync call. It given to a client or 'Handler' for it to wait on till the
|
||||||
|
|
|
@ -67,6 +67,7 @@ import org.apache.hadoop.hbase.Waiter.Predicate;
|
||||||
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
|
import org.apache.hadoop.hbase.client.ImmutableHRegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfo;
|
import org.apache.hadoop.hbase.client.RegionInfo;
|
||||||
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
import org.apache.hadoop.hbase.client.RegionInfoBuilder;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
import org.apache.hadoop.hbase.client.Admin;
|
import org.apache.hadoop.hbase.client.Admin;
|
||||||
import org.apache.hadoop.hbase.client.BufferedMutator;
|
import org.apache.hadoop.hbase.client.BufferedMutator;
|
||||||
|
@ -657,6 +658,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
|
org.apache.log4j.Logger.getLogger(org.apache.hadoop.metrics2.impl.MetricsSystemImpl.class).
|
||||||
setLevel(org.apache.log4j.Level.ERROR);
|
setLevel(org.apache.log4j.Level.ERROR);
|
||||||
|
|
||||||
|
TraceUtil.initTracer(conf);
|
||||||
|
|
||||||
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
|
this.dfsCluster = new MiniDFSCluster(0, this.conf, servers, true, true,
|
||||||
true, null, null, hosts, null);
|
true, null, null, hosts, null);
|
||||||
|
@ -1125,6 +1127,7 @@ public class HBaseTestingUtility extends HBaseCommonTestingUtility {
|
||||||
}
|
}
|
||||||
|
|
||||||
Configuration c = new Configuration(this.conf);
|
Configuration c = new Configuration(this.conf);
|
||||||
|
TraceUtil.initTracer(c);
|
||||||
this.hbaseCluster =
|
this.hbaseCluster =
|
||||||
new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
|
new MiniHBaseCluster(c, numMasters, numSlaves, masterClass, regionserverClass);
|
||||||
// Don't leave here till we've done a successful scan of the hbase:meta
|
// Don't leave here till we've done a successful scan of the hbase:meta
|
||||||
|
|
|
@ -563,7 +563,7 @@ public class TestSimpleRpcScheduler {
|
||||||
ServerCall putCall = new ServerCall(1, null, null,
|
ServerCall putCall = new ServerCall(1, null, null,
|
||||||
RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(),
|
RPCProtos.RequestHeader.newBuilder().setMethodName("mutate").build(),
|
||||||
RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))),
|
RequestConverter.buildMutateRequest(Bytes.toBytes("abc"), new Put(Bytes.toBytes("row"))),
|
||||||
null, null, 9, null, null, timestamp, 0, null, null, null) {
|
null, null, 9, null, timestamp, 0, null, null, null) {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void sendResponseIfReady() throws IOException {
|
public void sendResponseIfReady() throws IOException {
|
||||||
|
|
|
@ -18,25 +18,21 @@
|
||||||
package org.apache.hadoop.hbase.trace;
|
package org.apache.hadoop.hbase.trace;
|
||||||
|
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.assertNotNull;
|
|
||||||
import static org.junit.Assert.assertTrue;
|
import static org.junit.Assert.assertTrue;
|
||||||
|
|
||||||
import java.lang.reflect.Method;
|
|
||||||
import java.util.Collection;
|
|
||||||
|
|
||||||
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
import org.apache.hadoop.hbase.HBaseTestingUtility;
|
||||||
import org.apache.hadoop.hbase.TableName;
|
import org.apache.hadoop.hbase.TableName;
|
||||||
import org.apache.hadoop.hbase.Waiter;
|
import org.apache.hadoop.hbase.Waiter;
|
||||||
import org.apache.hadoop.hbase.client.Put;
|
import org.apache.hadoop.hbase.client.Put;
|
||||||
import org.apache.hadoop.hbase.client.Table;
|
import org.apache.hadoop.hbase.client.Table;
|
||||||
|
import org.apache.hadoop.hbase.shaded.com.google.common.collect.Sets;
|
||||||
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
import org.apache.hadoop.hbase.testclassification.MediumTests;
|
||||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.POJOSpanReceiver;
|
||||||
import org.apache.htrace.Span;
|
import org.apache.htrace.core.Sampler;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Span;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.SpanId;
|
||||||
import org.apache.htrace.TraceTree;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.impl.POJOSpanReceiver;
|
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.BeforeClass;
|
import org.junit.BeforeClass;
|
||||||
import org.junit.Rule;
|
import org.junit.Rule;
|
||||||
|
@ -44,103 +40,84 @@ import org.junit.Test;
|
||||||
import org.junit.experimental.categories.Category;
|
import org.junit.experimental.categories.Category;
|
||||||
import org.junit.rules.TestName;
|
import org.junit.rules.TestName;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
|
|
||||||
@Category({MiscTests.class, MediumTests.class})
|
@Category({MiscTests.class, MediumTests.class})
|
||||||
public class TestHTraceHooks {
|
public class TestHTraceHooks {
|
||||||
|
|
||||||
private static final byte[] FAMILY_BYTES = "family".getBytes();
|
private static final byte[] FAMILY_BYTES = "family".getBytes();
|
||||||
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
|
||||||
private static POJOSpanReceiver rcvr;
|
private static POJOSpanReceiver rcvr;
|
||||||
private static long ROOT_SPAN_ID = 0;
|
private static SpanId ROOT_SPAN_ID = new SpanId(0, 0);
|
||||||
|
|
||||||
@Rule
|
@Rule
|
||||||
public TestName name = new TestName();
|
public TestName name = new TestName();
|
||||||
|
|
||||||
@BeforeClass
|
@BeforeClass
|
||||||
public static void before() throws Exception {
|
public static void before() throws Exception {
|
||||||
|
|
||||||
// Find out what the right value to use fo SPAN_ROOT_ID after HTRACE-111. We use HTRACE-32
|
|
||||||
// to find out to detect if we are using HTrace 3.2 or not.
|
|
||||||
try {
|
|
||||||
Method m = Span.class.getMethod("addKVAnnotation", String.class, String.class);
|
|
||||||
} catch (NoSuchMethodException e) {
|
|
||||||
ROOT_SPAN_ID = 0x74aceL; // Span.SPAN_ROOT_ID pre HTrace-3.2
|
|
||||||
}
|
|
||||||
|
|
||||||
TEST_UTIL.startMiniCluster(2, 3);
|
TEST_UTIL.startMiniCluster(2, 3);
|
||||||
rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
|
rcvr = new POJOSpanReceiver(new HBaseHTraceConfiguration(TEST_UTIL.getConfiguration()));
|
||||||
Trace.addReceiver(rcvr);
|
TraceUtil.addReceiver(rcvr);
|
||||||
|
TraceUtil.addSampler(new Sampler() {
|
||||||
|
@Override
|
||||||
|
public boolean next() {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@AfterClass
|
@AfterClass
|
||||||
public static void after() throws Exception {
|
public static void after() throws Exception {
|
||||||
TEST_UTIL.shutdownMiniCluster();
|
TEST_UTIL.shutdownMiniCluster();
|
||||||
Trace.removeReceiver(rcvr);
|
TraceUtil.removeReceiver(rcvr);
|
||||||
rcvr = null;
|
rcvr = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testTraceCreateTable() throws Exception {
|
public void testTraceCreateTable() throws Exception {
|
||||||
TraceScope tableCreationSpan = Trace.startSpan("creating table", Sampler.ALWAYS);
|
|
||||||
Table table;
|
Table table;
|
||||||
try {
|
Span createTableSpan;
|
||||||
|
try (TraceScope scope = TraceUtil.createTrace("creating table")) {
|
||||||
|
createTableSpan = scope.getSpan();
|
||||||
table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES);
|
table = TEST_UTIL.createTable(TableName.valueOf(name.getMethodName()), FAMILY_BYTES);
|
||||||
} finally {
|
|
||||||
tableCreationSpan.close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Some table creation is async. Need to make sure that everything is full in before
|
// Some table creation is async. Need to make sure that everything is full in before
|
||||||
// checking to see if the spans are there.
|
// checking to see if the spans are there.
|
||||||
TEST_UTIL.waitFor(1000, new Waiter.Predicate<Exception>() {
|
TEST_UTIL.waitFor(10000, new Waiter.Predicate<Exception>() {
|
||||||
@Override
|
@Override public boolean evaluate() throws Exception {
|
||||||
public boolean evaluate() throws Exception {
|
return (rcvr == null) ? true : rcvr.getSpans().size() >= 5;
|
||||||
return rcvr.getSpans().size() >= 5;
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
Collection<Span> spans = rcvr.getSpans();
|
Collection<Span> spans = Sets.newHashSet(rcvr.getSpans());
|
||||||
|
List<Span> roots = new LinkedList<>();
|
||||||
TraceTree traceTree = new TraceTree(spans);
|
TraceTree traceTree = new TraceTree(spans);
|
||||||
Collection<Span> roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
|
roots.addAll(traceTree.getSpansByParent().find(createTableSpan.getSpanId()));
|
||||||
|
|
||||||
assertEquals(1, roots.size());
|
assertEquals(3, roots.size());
|
||||||
Span createTableRoot = roots.iterator().next();
|
assertEquals("creating table", createTableSpan.getDescription());
|
||||||
|
|
||||||
assertEquals("creating table", createTableRoot.getDescription());
|
if (spans != null) {
|
||||||
|
assertTrue(spans.size() > 5);
|
||||||
int createTableCount = 0;
|
|
||||||
|
|
||||||
for (Span s : traceTree.getSpansByParent().find(createTableRoot.getSpanId())) {
|
|
||||||
if (s.getDescription().startsWith("MasterService.CreateTable")) {
|
|
||||||
createTableCount++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
assertTrue(createTableCount >= 1);
|
|
||||||
assertTrue(traceTree.getSpansByParent().find(createTableRoot.getSpanId()).size() > 3);
|
|
||||||
assertTrue(spans.size() > 5);
|
|
||||||
|
|
||||||
Put put = new Put("row".getBytes());
|
Put put = new Put("row".getBytes());
|
||||||
put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
|
put.addColumn(FAMILY_BYTES, "col".getBytes(), "value".getBytes());
|
||||||
|
|
||||||
TraceScope putSpan = Trace.startSpan("doing put", Sampler.ALWAYS);
|
Span putSpan;
|
||||||
try {
|
|
||||||
|
try (TraceScope scope = TraceUtil.createTrace("doing put")) {
|
||||||
|
putSpan = scope.getSpan();
|
||||||
table.put(put);
|
table.put(put);
|
||||||
} finally {
|
|
||||||
putSpan.close();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spans = rcvr.getSpans();
|
spans = rcvr.getSpans();
|
||||||
traceTree = new TraceTree(spans);
|
traceTree = new TraceTree(spans);
|
||||||
roots = traceTree.getSpansByParent().find(ROOT_SPAN_ID);
|
roots.clear();
|
||||||
|
roots.addAll(traceTree.getSpansByParent().find(putSpan.getSpanId()));
|
||||||
assertEquals(2, roots.size());
|
assertEquals(1, roots.size());
|
||||||
Span putRoot = null;
|
|
||||||
for (Span root : roots) {
|
|
||||||
if (root.getDescription().equals("doing put")) {
|
|
||||||
putRoot = root;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assertNotNull(putRoot);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -0,0 +1,134 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hbase.trace;
|
||||||
|
|
||||||
|
import org.apache.htrace.core.Span;
|
||||||
|
import org.apache.htrace.core.SpanId;
|
||||||
|
|
||||||
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.Iterator;
|
||||||
|
import java.util.LinkedHashSet;
|
||||||
|
import java.util.LinkedList;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Set;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Used to create the graph formed by spans.
|
||||||
|
*/
|
||||||
|
public class TraceTree {
|
||||||
|
|
||||||
|
public static class SpansByParent {
|
||||||
|
private final Set<Span> set;
|
||||||
|
|
||||||
|
private final HashMap<SpanId, LinkedList<Span>> parentToSpans;
|
||||||
|
|
||||||
|
SpansByParent(Collection<Span> spans) {
|
||||||
|
set = new LinkedHashSet<Span>();
|
||||||
|
parentToSpans = new HashMap<SpanId, LinkedList<Span>>();
|
||||||
|
if(spans == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (Span span : spans) {
|
||||||
|
set.add(span);
|
||||||
|
for (SpanId parent : span.getParents()) {
|
||||||
|
LinkedList<Span> list = parentToSpans.get(parent);
|
||||||
|
if (list == null) {
|
||||||
|
list = new LinkedList<Span>();
|
||||||
|
parentToSpans.put(parent, list);
|
||||||
|
}
|
||||||
|
list.add(span);
|
||||||
|
}
|
||||||
|
if (span.getParents().length == 0) {
|
||||||
|
LinkedList<Span> list = parentToSpans.get(Long.valueOf(0L));
|
||||||
|
if (list == null) {
|
||||||
|
list = new LinkedList<Span>();
|
||||||
|
parentToSpans.put(new SpanId(Long.MIN_VALUE, Long.MIN_VALUE), list);
|
||||||
|
}
|
||||||
|
list.add(span);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Span> find(SpanId parentId) {
|
||||||
|
LinkedList<Span> spans = parentToSpans.get(parentId);
|
||||||
|
if (spans == null) {
|
||||||
|
return new LinkedList<Span>();
|
||||||
|
}
|
||||||
|
return spans;
|
||||||
|
}
|
||||||
|
|
||||||
|
public Iterator<Span> iterator() {
|
||||||
|
return Collections.unmodifiableSet(set).iterator();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public static class SpansByProcessId {
|
||||||
|
private final Set<Span> set;
|
||||||
|
|
||||||
|
SpansByProcessId(Collection<Span> spans) {
|
||||||
|
set = new LinkedHashSet<Span>();
|
||||||
|
if(spans == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
for (Span span : spans) {
|
||||||
|
set.add(span);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public Iterator<Span> iterator() {
|
||||||
|
return Collections.unmodifiableSet(set).iterator();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private final SpansByParent spansByParent;
|
||||||
|
private final SpansByProcessId spansByProcessId;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Create a new TraceTree
|
||||||
|
*
|
||||||
|
* @param spans The collection of spans to use to create this TraceTree. Should
|
||||||
|
* have at least one root span.
|
||||||
|
*/
|
||||||
|
public TraceTree(Collection<Span> spans) {
|
||||||
|
this.spansByParent = new SpansByParent(spans);
|
||||||
|
this.spansByProcessId = new SpansByProcessId(spans);
|
||||||
|
}
|
||||||
|
|
||||||
|
public SpansByParent getSpansByParent() {
|
||||||
|
return spansByParent;
|
||||||
|
}
|
||||||
|
|
||||||
|
public SpansByProcessId getSpansByProcessId() {
|
||||||
|
return spansByProcessId;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
StringBuilder bld = new StringBuilder();
|
||||||
|
String prefix = "";
|
||||||
|
for (Iterator<Span> iter = spansByParent.iterator(); iter.hasNext();) {
|
||||||
|
Span span = iter.next();
|
||||||
|
bld.append(prefix).append(span.toString());
|
||||||
|
prefix = "\n";
|
||||||
|
}
|
||||||
|
return bld.toString();
|
||||||
|
}
|
||||||
|
}
|
|
@ -58,16 +58,17 @@ import org.apache.hadoop.hbase.regionserver.wal.SecureProtobufLogWriter;
|
||||||
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
import org.apache.hadoop.hbase.regionserver.wal.WALActionsListener;
|
||||||
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
|
import org.apache.hadoop.hbase.trace.HBaseHTraceConfiguration;
|
||||||
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
import org.apache.hadoop.hbase.trace.SpanReceiverHost;
|
||||||
|
import org.apache.hadoop.hbase.trace.TraceUtil;
|
||||||
import org.apache.hadoop.hbase.util.Bytes;
|
import org.apache.hadoop.hbase.util.Bytes;
|
||||||
import org.apache.hadoop.hbase.util.FSUtils;
|
import org.apache.hadoop.hbase.util.FSUtils;
|
||||||
import org.apache.hadoop.hbase.util.Threads;
|
import org.apache.hadoop.hbase.util.Threads;
|
||||||
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
|
import org.apache.hadoop.hbase.wal.WALProvider.Writer;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
import org.apache.htrace.Sampler;
|
import org.apache.htrace.core.ProbabilitySampler;
|
||||||
import org.apache.htrace.Trace;
|
import org.apache.htrace.core.Sampler;
|
||||||
import org.apache.htrace.TraceScope;
|
import org.apache.htrace.core.TraceScope;
|
||||||
import org.apache.htrace.impl.ProbabilitySampler;
|
import org.apache.htrace.core.Tracer;
|
||||||
import org.apache.yetus.audience.InterfaceAudience;
|
import org.apache.yetus.audience.InterfaceAudience;
|
||||||
|
|
||||||
import com.codahale.metrics.ConsoleReporter;
|
import com.codahale.metrics.ConsoleReporter;
|
||||||
|
@ -172,15 +173,13 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
Random rand = new Random(Thread.currentThread().getId());
|
Random rand = new Random(Thread.currentThread().getId());
|
||||||
WAL wal = region.getWAL();
|
WAL wal = region.getWAL();
|
||||||
|
|
||||||
TraceScope threadScope =
|
try (TraceScope threadScope = TraceUtil.createTrace("WALPerfEval." + Thread.currentThread().getName())) {
|
||||||
Trace.startSpan("WALPerfEval." + Thread.currentThread().getName());
|
|
||||||
try {
|
|
||||||
long startTime = System.currentTimeMillis();
|
long startTime = System.currentTimeMillis();
|
||||||
int lastSync = 0;
|
int lastSync = 0;
|
||||||
|
TraceUtil.addSampler(loopSampler);
|
||||||
for (int i = 0; i < numIterations; ++i) {
|
for (int i = 0; i < numIterations; ++i) {
|
||||||
assert Trace.currentSpan() == threadScope.getSpan() : "Span leak detected.";
|
assert Tracer.getCurrentSpan() == threadScope.getSpan() : "Span leak detected.";
|
||||||
TraceScope loopScope = Trace.startSpan("runLoopIter" + i, loopSampler);
|
try (TraceScope loopScope = TraceUtil.createTrace("runLoopIter" + i)) {
|
||||||
try {
|
|
||||||
long now = System.nanoTime();
|
long now = System.nanoTime();
|
||||||
Put put = setupPut(rand, key, value, numFamilies);
|
Put put = setupPut(rand, key, value, numFamilies);
|
||||||
WALEdit walEdit = new WALEdit();
|
WALEdit walEdit = new WALEdit();
|
||||||
|
@ -196,16 +195,12 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
latencyHistogram.update(System.nanoTime() - now);
|
latencyHistogram.update(System.nanoTime() - now);
|
||||||
} finally {
|
|
||||||
loopScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
long totalTime = (System.currentTimeMillis() - startTime);
|
long totalTime = (System.currentTimeMillis() - startTime);
|
||||||
logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
|
logBenchmarkResult(Thread.currentThread().getName(), numIterations, totalTime);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
LOG.error(getClass().getSimpleName() + " Thread failed", e);
|
LOG.error(getClass().getSimpleName() + " Thread failed", e);
|
||||||
} finally {
|
|
||||||
threadScope.close();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -315,8 +310,9 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
LOG.info("FileSystem: " + fs);
|
LOG.info("FileSystem: " + fs);
|
||||||
|
|
||||||
SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
|
SpanReceiverHost receiverHost = trace ? SpanReceiverHost.getInstance(getConf()) : null;
|
||||||
final Sampler<?> sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
|
final Sampler sampler = trace ? Sampler.ALWAYS : Sampler.NEVER;
|
||||||
TraceScope scope = Trace.startSpan("WALPerfEval", sampler);
|
TraceUtil.addSampler(sampler);
|
||||||
|
TraceScope scope = TraceUtil.createTrace("WALPerfEval");
|
||||||
|
|
||||||
try {
|
try {
|
||||||
if (rootRegionDir == null) {
|
if (rootRegionDir == null) {
|
||||||
|
@ -338,8 +334,8 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
// a table per desired region means we can avoid carving up the key space
|
// a table per desired region means we can avoid carving up the key space
|
||||||
final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
|
final HTableDescriptor htd = createHTableDescriptor(i, numFamilies);
|
||||||
regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
|
regions[i] = openRegion(fs, rootRegionDir, htd, wals, roll, roller);
|
||||||
benchmarks[i] = Trace.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
|
benchmarks[i] = TraceUtil.wrap(new WALPutBenchmark(regions[i], htd, numIterations, noSync,
|
||||||
syncInterval, traceFreq));
|
syncInterval, traceFreq), "");
|
||||||
}
|
}
|
||||||
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
|
ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).
|
||||||
outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
|
outputTo(System.out).convertRatesTo(TimeUnit.SECONDS).filter(MetricFilter.ALL).build();
|
||||||
|
@ -389,9 +385,15 @@ public final class WALPerformanceEvaluation extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
// We may be called inside a test that wants to keep on using the fs.
|
// We may be called inside a test that wants to keep on using the fs.
|
||||||
if (!noclosefs) fs.close();
|
if (!noclosefs) {
|
||||||
scope.close();
|
fs.close();
|
||||||
if (receiverHost != null) receiverHost.closeReceivers();
|
}
|
||||||
|
if (scope != null) {
|
||||||
|
scope.close();
|
||||||
|
}
|
||||||
|
if (receiverHost != null) {
|
||||||
|
receiverHost.closeReceivers();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return(0);
|
return(0);
|
||||||
|
|
|
@ -305,6 +305,12 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
@ -352,6 +358,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -364,6 +374,10 @@
|
||||||
<type>test-jar</type>
|
<type>test-jar</type>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -375,6 +389,10 @@
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -429,11 +447,21 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
|
|
@ -16,8 +16,8 @@
|
||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
#
|
#
|
||||||
HTrace = org.apache.htrace.Trace
|
HTrace = org.apache.htrace.core.Tracer
|
||||||
java_import org.apache.htrace.Sampler
|
java_import org.apache.htrace.core.Sampler
|
||||||
java_import org.apache.hadoop.hbase.trace.SpanReceiverHost
|
java_import org.apache.hadoop.hbase.trace.SpanReceiverHost
|
||||||
|
|
||||||
module Shell
|
module Shell
|
||||||
|
|
|
@ -0,0 +1,716 @@
|
||||||
|
<?xml version="1.0"?>
|
||||||
|
<!--
|
||||||
|
|
||||||
|
Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
or more contributor license agreements. See the NOTICE file
|
||||||
|
distributed with this work for additional information
|
||||||
|
regarding copyright ownership. The ASF licenses this file
|
||||||
|
to you under the Apache License, Version 2.0 (the
|
||||||
|
"License"); you may not use this file except in compliance
|
||||||
|
with the License. You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing,
|
||||||
|
software distributed under the License is distributed on an
|
||||||
|
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||||
|
KIND, either express or implied. See the License for the
|
||||||
|
specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
|
||||||
|
-->
|
||||||
|
<project xmlns="http://maven.apache.org/POM/4.0.0" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
|
||||||
|
<modelVersion>4.0.0</modelVersion>
|
||||||
|
<parent>
|
||||||
|
<artifactId>hbase-build-configuration</artifactId>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<version>3.0.0-SNAPSHOT</version>
|
||||||
|
<relativePath>../hbase-build-configuration</relativePath>
|
||||||
|
</parent>
|
||||||
|
<artifactId>hbase-spark</artifactId>
|
||||||
|
<name>Apache HBase - Spark</name>
|
||||||
|
<properties>
|
||||||
|
<spark.version>1.6.0</spark.version>
|
||||||
|
<scala.version>2.10.4</scala.version>
|
||||||
|
<scala.binary.version>2.10</scala.binary.version>
|
||||||
|
<top.dir>${project.basedir}/..</top.dir>
|
||||||
|
</properties>
|
||||||
|
<dependencies>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase.thirdparty</groupId>
|
||||||
|
<artifactId>hbase-shaded-miscellaneous</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<!-- Force import of Spark's servlet API for unit tests -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>javax.servlet</groupId>
|
||||||
|
<artifactId>javax.servlet-api</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<!-- Mark Spark / Scala as provided -->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.scala-lang</groupId>
|
||||||
|
<artifactId>scala-library</artifactId>
|
||||||
|
<version>${scala.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<!-- we exclude jsr305 below and then expressly relist it as
|
||||||
|
provided / optional to avoid dependency resolution possibly
|
||||||
|
bringing it back into runtime scope.
|
||||||
|
-->
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-core_${scala.binary.version}</artifactId>
|
||||||
|
<version>${spark.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<!-- make sure wrong scala version is not pulled in -->
|
||||||
|
<groupId>org.scala-lang</groupId>
|
||||||
|
<artifactId>scala-library</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<!-- make sure wrong scala version is not pulled in -->
|
||||||
|
<groupId>org.scala-lang</groupId>
|
||||||
|
<artifactId>scalap</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>jsr305</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>jsr305</artifactId>
|
||||||
|
<version>1.3.9</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
<optional>true</optional>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-sql_${scala.binary.version}</artifactId>
|
||||||
|
<version>${spark.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-streaming_${scala.binary.version}</artifactId>
|
||||||
|
<version>${spark.version}</version>
|
||||||
|
<scope>provided</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.spark</groupId>
|
||||||
|
<artifactId>spark-streaming_${scala.binary.version}</artifactId>
|
||||||
|
<version>${spark.version}</version>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<classifier>tests</classifier>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>junit</groupId>
|
||||||
|
<artifactId>junit</artifactId>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.scalatest</groupId>
|
||||||
|
<artifactId>scalatest_${scala.binary.version}</artifactId>
|
||||||
|
<version>2.2.4</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.scalamock</groupId>
|
||||||
|
<artifactId>scalamock-scalatest-support_${scala.binary.version}</artifactId>
|
||||||
|
<version>3.1.4</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.fasterxml.jackson.module</groupId>
|
||||||
|
<artifactId>jackson-module-scala_${scala.binary.version}</artifactId>
|
||||||
|
<version>${jackson.version}</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.scala-lang</groupId>
|
||||||
|
<artifactId>scala-library</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.scala-lang</groupId>
|
||||||
|
<artifactId>scala-reflect</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-client</artifactId>
|
||||||
|
<version>${hadoop-two.version}</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet</groupId>
|
||||||
|
<artifactId>servlet-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
|
<artifactId>jsp-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jruby</groupId>
|
||||||
|
<artifactId>jruby-complete</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<version>${hadoop-two.version}</version>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet</groupId>
|
||||||
|
<artifactId>servlet-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
|
<artifactId>jsp-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jruby</groupId>
|
||||||
|
<artifactId>jruby-complete</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>jsr305</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<version>${hadoop-two.version}</version>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<scope>test</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet</groupId>
|
||||||
|
<artifactId>servlet-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
|
<artifactId>jsp-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jruby</groupId>
|
||||||
|
<artifactId>jruby-complete</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.google.code.findbugs</groupId>
|
||||||
|
<artifactId>jsr305</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
|
<version>${hadoop-two.version}</version>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<scope>test</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet</groupId>
|
||||||
|
<artifactId>servlet-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
|
<artifactId>jsp-api</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jruby</groupId>
|
||||||
|
<artifactId>jruby-complete</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>xerces</groupId>
|
||||||
|
<artifactId>xercesImpl</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-client</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.thrift</groupId>
|
||||||
|
<artifactId>thrift</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-log4j12</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jsp-2.1</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jsp-api-2.1</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>servlet-api-2.5</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-json</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-server</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jetty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jetty-util</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>tomcat</groupId>
|
||||||
|
<artifactId>jasper-runtime</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>tomcat</groupId>
|
||||||
|
<artifactId>jasper-compiler</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-protocol</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-protocol-shaded</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-annotations</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-common</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-annotations</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-hadoop-compat</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.thrift</groupId>
|
||||||
|
<artifactId>thrift</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-log4j12</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jsp-2.1</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jsp-api-2.1</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>servlet-api-2.5</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-json</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-server</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jetty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jetty-util</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>tomcat</groupId>
|
||||||
|
<artifactId>jasper-runtime</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>tomcat</groupId>
|
||||||
|
<artifactId>jasper-compiler</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-hadoop2-compat</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>log4j</groupId>
|
||||||
|
<artifactId>log4j</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.thrift</groupId>
|
||||||
|
<artifactId>thrift</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.slf4j</groupId>
|
||||||
|
<artifactId>slf4j-log4j12</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jsp-2.1</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jsp-api-2.1</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>servlet-api-2.5</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-json</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>com.sun.jersey</groupId>
|
||||||
|
<artifactId>jersey-server</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jetty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.mortbay.jetty</groupId>
|
||||||
|
<artifactId>jetty-util</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>tomcat</groupId>
|
||||||
|
<artifactId>jasper-runtime</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>tomcat</groupId>
|
||||||
|
<artifactId>jasper-compiler</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.jboss.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>io.netty</groupId>
|
||||||
|
<artifactId>netty</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-server</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-server</artifactId>
|
||||||
|
<version>${project.version}</version>
|
||||||
|
<scope>test</scope>
|
||||||
|
<type>test-jar</type>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hbase</groupId>
|
||||||
|
<artifactId>hbase-mapreduce</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>com.google.protobuf</groupId>
|
||||||
|
<artifactId>protobuf-java</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>commons-io</groupId>
|
||||||
|
<artifactId>commons-io</artifactId>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
<artifactId>hadoop-mapreduce-client-jobclient</artifactId>
|
||||||
|
<type>test-jar</type>
|
||||||
|
<scope>test</scope>
|
||||||
|
</dependency>
|
||||||
|
<dependency>
|
||||||
|
<groupId>org.apache.avro</groupId>
|
||||||
|
<artifactId>avro</artifactId>
|
||||||
|
</dependency>
|
||||||
|
</dependencies>
|
||||||
|
<build>
|
||||||
|
<plugins>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-compiler-plugin</artifactId>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>net.alchim31.maven</groupId>
|
||||||
|
<artifactId>scala-maven-plugin</artifactId>
|
||||||
|
<version>3.2.0</version>
|
||||||
|
<configuration>
|
||||||
|
<charset>${project.build.sourceEncoding}</charset>
|
||||||
|
<scalaVersion>${scala.version}</scalaVersion>
|
||||||
|
<args>
|
||||||
|
<arg>-feature</arg>
|
||||||
|
</args>
|
||||||
|
</configuration>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>scala-compile-first</id>
|
||||||
|
<phase>process-resources</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>add-source</goal>
|
||||||
|
<goal>compile</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>scala-test-compile</id>
|
||||||
|
<phase>process-test-resources</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>testCompile</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.scalatest</groupId>
|
||||||
|
<artifactId>scalatest-maven-plugin</artifactId>
|
||||||
|
<version>1.0</version>
|
||||||
|
<configuration>
|
||||||
|
<reportsDirectory>${project.build.directory}/surefire-reports</reportsDirectory>
|
||||||
|
<junitxml>.</junitxml>
|
||||||
|
<filereports>WDF TestSuite.txt</filereports>
|
||||||
|
<parallel>false</parallel>
|
||||||
|
<systemProperties>
|
||||||
|
<org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>org.apache.hadoop.hbase.shaded.</org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>
|
||||||
|
</systemProperties>
|
||||||
|
</configuration>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>test</id>
|
||||||
|
<phase>test</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>test</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<systemProperties>
|
||||||
|
<org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>org.apache.hadoop.hbase.shaded.</org.apache.hadoop.hbase.shaded.io.netty.packagePrefix>
|
||||||
|
</systemProperties>
|
||||||
|
<argLine> -Xmx1536m -XX:ReservedCodeCacheSize=512m
|
||||||
|
</argLine>
|
||||||
|
<parallel>false</parallel>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<!-- clover fails due to scala/java cross compile. This guarantees that the scala is
|
||||||
|
compiled before the java that will be evaluated by code coverage (scala will not be).
|
||||||
|
https://confluence.atlassian.com/display/CLOVERKB/Java-+Scala+cross-compilation+error+-+cannot+find+symbol
|
||||||
|
-->
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.codehaus.mojo</groupId>
|
||||||
|
<artifactId>build-helper-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>add-source</id>
|
||||||
|
<phase>validate</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>add-source</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<sources>
|
||||||
|
<source>src/main/scala</source>
|
||||||
|
</sources>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
<execution>
|
||||||
|
<id>add-test-source</id>
|
||||||
|
<phase>validate</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>add-test-source</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<sources>
|
||||||
|
<source>src/test/scala</source>
|
||||||
|
</sources>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.xolstice.maven.plugins</groupId>
|
||||||
|
<artifactId>protobuf-maven-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<execution>
|
||||||
|
<id>compile-protoc</id>
|
||||||
|
<phase>generate-sources</phase>
|
||||||
|
<goals>
|
||||||
|
<goal>compile</goal>
|
||||||
|
</goals>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
<plugin>
|
||||||
|
<groupId>org.apache.maven.plugins</groupId>
|
||||||
|
<artifactId>maven-enforcer-plugin</artifactId>
|
||||||
|
<executions>
|
||||||
|
<!-- purposefully have jsr 305 exclusion only warn in this module -->
|
||||||
|
<execution>
|
||||||
|
<id>banned-jsr305</id>
|
||||||
|
<goals>
|
||||||
|
<goal>enforce</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<fail>false</fail>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
<!-- scala is ok in the spark modules -->
|
||||||
|
<execution>
|
||||||
|
<id>banned-scala</id>
|
||||||
|
<goals>
|
||||||
|
<goal>enforce</goal>
|
||||||
|
</goals>
|
||||||
|
<configuration>
|
||||||
|
<skip>true</skip>
|
||||||
|
</configuration>
|
||||||
|
</execution>
|
||||||
|
</executions>
|
||||||
|
</plugin>
|
||||||
|
</plugins>
|
||||||
|
</build>
|
||||||
|
<profiles>
|
||||||
|
<!-- Skip the tests in this module -->
|
||||||
|
<profile>
|
||||||
|
<id>skipSparkTests</id>
|
||||||
|
<activation>
|
||||||
|
<property>
|
||||||
|
<name>skipSparkTests</name>
|
||||||
|
</property>
|
||||||
|
</activation>
|
||||||
|
<properties>
|
||||||
|
<surefire.skipFirstPart>true</surefire.skipFirstPart>
|
||||||
|
<surefire.skipSecondPart>true</surefire.skipSecondPart>
|
||||||
|
<skipTests>true</skipTests>
|
||||||
|
</properties>
|
||||||
|
</profile>
|
||||||
|
</profiles>
|
||||||
|
</project>
|
|
@ -140,6 +140,12 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
@ -184,6 +190,10 @@
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -196,6 +206,10 @@
|
||||||
<type>test-jar</type>
|
<type>test-jar</type>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -207,6 +221,10 @@
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -239,11 +257,23 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<scope>compile</scope>
|
<scope>compile</scope>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
|
|
|
@ -513,12 +513,22 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>com.google.guava</groupId>
|
<groupId>com.google.guava</groupId>
|
||||||
<artifactId>guava</artifactId>
|
<artifactId>guava</artifactId>
|
||||||
|
@ -571,10 +581,22 @@
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
|
</exclusions>
|
||||||
</dependency>
|
</dependency>
|
||||||
</dependencies>
|
</dependencies>
|
||||||
<build>
|
<build>
|
||||||
|
|
61
pom.xml
61
pom.xml
|
@ -1344,7 +1344,8 @@
|
||||||
<jruby.version>9.1.10.0</jruby.version>
|
<jruby.version>9.1.10.0</jruby.version>
|
||||||
<junit.version>4.12</junit.version>
|
<junit.version>4.12</junit.version>
|
||||||
<hamcrest.version>1.3</hamcrest.version>
|
<hamcrest.version>1.3</hamcrest.version>
|
||||||
<htrace.version>3.2.0-incubating</htrace.version>
|
<htrace.version>4.2.0-incubating</htrace.version>
|
||||||
|
<htrace-hadoop.version>3.2.0-incubating</htrace-hadoop.version>
|
||||||
<log4j.version>1.2.17</log4j.version>
|
<log4j.version>1.2.17</log4j.version>
|
||||||
<mockito-core.version>2.1.0</mockito-core.version>
|
<mockito-core.version>2.1.0</mockito-core.version>
|
||||||
<!--Internally we use a different version of protobuf. See hbase-protocol-shaded-->
|
<!--Internally we use a different version of protobuf. See hbase-protocol-shaded-->
|
||||||
|
@ -1959,7 +1960,7 @@
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
<groupId>org.apache.htrace</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>htrace-core</artifactId>
|
<artifactId>htrace-core4</artifactId>
|
||||||
<version>${htrace.version}</version>
|
<version>${htrace.version}</version>
|
||||||
</dependency>
|
</dependency>
|
||||||
<dependency>
|
<dependency>
|
||||||
|
@ -2392,6 +2393,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -2430,6 +2435,10 @@
|
||||||
<type>test-jar</type>
|
<type>test-jar</type>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -2470,6 +2479,10 @@
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<version>${hadoop-two.version}</version>
|
<version>${hadoop-two.version}</version>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>commons-beanutils</groupId>
|
<groupId>commons-beanutils</groupId>
|
||||||
<artifactId>commons-beanutils</artifactId>
|
<artifactId>commons-beanutils</artifactId>
|
||||||
|
@ -2520,10 +2533,14 @@
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<version>${hadoop-two.version}</version>
|
<version>${hadoop-two.version}</version>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>commons-httpclient</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>commons-httpclient</artifactId>
|
<artifactId>htrace-core</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>commons-httpclient</groupId>
|
||||||
|
<artifactId>commons-httpclient</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -2630,6 +2647,10 @@
|
||||||
<groupId>org.apache.hadoop</groupId>
|
<groupId>org.apache.hadoop</groupId>
|
||||||
<artifactId>hadoop-hdfs</artifactId>
|
<artifactId>hadoop-hdfs</artifactId>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -2664,6 +2685,10 @@
|
||||||
<type>test-jar</type>
|
<type>test-jar</type>
|
||||||
<scope>test</scope>
|
<scope>test</scope>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>org.apache.htrace</groupId>
|
||||||
|
<artifactId>htrace-core</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -2706,10 +2731,14 @@
|
||||||
<artifactId>hadoop-common</artifactId>
|
<artifactId>hadoop-common</artifactId>
|
||||||
<version>${hadoop-three.version}</version>
|
<version>${hadoop-three.version}</version>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>commons-beanutils</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>commons-beanutils</artifactId>
|
<artifactId>htrace-core</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>commons-beanutils</groupId>
|
||||||
|
<artifactId>commons-beanutils</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
@ -2761,10 +2790,14 @@
|
||||||
<artifactId>hadoop-minicluster</artifactId>
|
<artifactId>hadoop-minicluster</artifactId>
|
||||||
<version>${hadoop-three.version}</version>
|
<version>${hadoop-three.version}</version>
|
||||||
<exclusions>
|
<exclusions>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>commons-httpclient</groupId>
|
<groupId>org.apache.htrace</groupId>
|
||||||
<artifactId>commons-httpclient</artifactId>
|
<artifactId>htrace-core</artifactId>
|
||||||
</exclusion>
|
</exclusion>
|
||||||
|
<exclusion>
|
||||||
|
<groupId>commons-httpclient</groupId>
|
||||||
|
<artifactId>commons-httpclient</artifactId>
|
||||||
|
</exclusion>
|
||||||
<exclusion>
|
<exclusion>
|
||||||
<groupId>javax.servlet.jsp</groupId>
|
<groupId>javax.servlet.jsp</groupId>
|
||||||
<artifactId>jsp-api</artifactId>
|
<artifactId>jsp-api</artifactId>
|
||||||
|
|
|
@ -57,7 +57,7 @@ The `LocalFileSpanReceiver` looks in _hbase-site.xml_ for a `hbase.local-fi
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.trace.spanreceiver.classes</name>
|
<name>hbase.trace.spanreceiver.classes</name>
|
||||||
<value>org.apache.htrace.impl.LocalFileSpanReceiver</value>
|
<value>org.apache.htrace.core.LocalFileSpanReceiver</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.htrace.local-file-span-receiver.path</name>
|
<name>hbase.htrace.local-file-span-receiver.path</name>
|
||||||
|
@ -76,7 +76,7 @@ _htrace-zipkin_ is published to the link:http://search.maven.org/#search%7Cgav%7
|
||||||
|
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.trace.spanreceiver.classes</name>
|
<name>hbase.trace.spanreceiver.classes</name>
|
||||||
<value>org.apache.htrace.impl.ZipkinSpanReceiver</value>
|
<value>org.apache.htrace.core.ZipkinSpanReceiver</value>
|
||||||
</property>
|
</property>
|
||||||
<property>
|
<property>
|
||||||
<name>hbase.htrace.zipkin.collector-hostname</name>
|
<name>hbase.htrace.zipkin.collector-hostname</name>
|
||||||
|
|
Loading…
Reference in New Issue