HBASE-9121 Update HTrace to 2.00 and add new example usage. -- Part2

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1514094 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
eclark 2013-08-14 23:29:48 +00:00
parent 5c2a90aa28
commit e926483258
3 changed files with 320 additions and 89 deletions

View File

@ -0,0 +1,290 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.trace;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.IntegrationTestingUtility;
import org.apache.hadoop.hbase.IntegrationTests;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.util.AbstractHBaseTool;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.util.ToolRunner;
import org.cloudera.htrace.Sampler;
import org.cloudera.htrace.Span;
import org.cloudera.htrace.Trace;
import org.cloudera.htrace.TraceScope;
import org.junit.Before;
import org.junit.Test;
import org.junit.experimental.categories.Category;
import java.io.IOException;
import java.util.Random;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
@Category(IntegrationTests.class)
public class IntegrationTestSendTraceRequests extends AbstractHBaseTool {
public static final String TABLE_ARG = "t";
public static final String CF_ARG = "f";
public static final String TABLE_NAME_DEFAULT = "SendTracesTable";
public static final String COLUMN_FAMILY_DEFAULT = "D";
private String tableName = TABLE_NAME_DEFAULT;
private String familyName = COLUMN_FAMILY_DEFAULT;
private IntegrationTestingUtility util;
private Random random = new Random();
private HBaseAdmin admin;
private SpanReceiverHost receiverHost;
public static void main(String[] args) throws Exception {
Configuration configuration = HBaseConfiguration.create();
IntegrationTestingUtility.setUseDistributedCluster(configuration);
IntegrationTestSendTraceRequests tool = new IntegrationTestSendTraceRequests();
ToolRunner.run(configuration, tool, args);
}
@Override
protected void addOptions() {
addOptWithArg(TABLE_ARG, "The table name to target. Will be created if not there already.");
addOptWithArg(TABLE_ARG, "The family to target");
}
@Override
public void processOptions(CommandLine cmd) {
String tableNameString = cmd.getOptionValue(TABLE_ARG, TABLE_NAME_DEFAULT);
String familyString = cmd.getOptionValue(CF_ARG, COLUMN_FAMILY_DEFAULT);
this.tableName = tableNameString;
this.familyName = familyString;
}
@Override
public int doWork() throws Exception {
internalDoWork();
return 0;
}
@Test
public void internalDoWork() throws Exception {
util = createUtil();
admin = util.getHBaseAdmin();
setupReceiver();
deleteTable();
createTable();
LinkedBlockingQueue<Long> rks = insertData();
ExecutorService service = Executors.newFixedThreadPool(20);
doScans(service, rks);
doGets(service, rks);
service.shutdown();
service.awaitTermination(100, TimeUnit.SECONDS);
Thread.sleep(90000);
receiverHost.closeReceivers();
util.restoreCluster();
util = null;
}
private void doScans(ExecutorService service, final LinkedBlockingQueue<Long> rks) {
for (int i = 0; i < 100; i++) {
Runnable runnable = new Runnable() {
private TraceScope innerScope = null;
private final LinkedBlockingQueue<Long> rowKeyQueue = rks;
@Override
public void run() {
ResultScanner rs = null;
try {
innerScope = Trace.startSpan("Scan", Sampler.ALWAYS);
HTable ht = new HTable(util.getConfiguration(), tableName);
Scan s = new Scan();
s.setStartRow(Bytes.toBytes(rowKeyQueue.take()));
s.setBatch(7);
rs = ht.getScanner(s);
// Something to keep the jvm from removing the loop.
long accum = 0;
for(int x = 0; x < 1000; x++) {
Result r = rs.next();
accum |= Bytes.toLong(r.getRow());
}
innerScope.getSpan().addTimelineAnnotation("Accum result = " + accum);
ht.close();
ht = null;
} catch (IOException e) {
e.printStackTrace();
innerScope.getSpan().addKVAnnotation(
Bytes.toBytes("exception"),
Bytes.toBytes(e.getClass().getSimpleName()));
} catch (Exception e) {
} finally {
if (innerScope != null) innerScope.close();
if (rs != null) rs.close();
}
}
};
service.submit(runnable);
}
}
private void doGets(ExecutorService service, final LinkedBlockingQueue<Long> rowKeys)
throws IOException {
for (int i = 0; i < 100; i++) {
Runnable runnable = new Runnable() {
private TraceScope innerScope = null;
private final LinkedBlockingQueue<Long> rowKeyQueue = rowKeys;
@Override
public void run() {
HTable ht = null;
try {
ht = new HTable(util.getConfiguration(), tableName);
} catch (IOException e) {
e.printStackTrace();
}
long accum = 0;
for (int x = 0; x < 5; x++) {
try {
innerScope = Trace.startSpan("gets", Sampler.ALWAYS);
long rk = rowKeyQueue.take();
Result r1 = ht.get(new Get(Bytes.toBytes(rk)));
if (r1 != null) {
accum |= Bytes.toLong(r1.getRow());
}
Result r2 = ht.get(new Get(Bytes.toBytes(rk)));
if (r2 != null) {
accum |= Bytes.toLong(r2.getRow());
}
innerScope.getSpan().addTimelineAnnotation("Accum = " + accum);
} catch (IOException e) {
// IGNORED
} catch (InterruptedException ie) {
// IGNORED
} finally {
if (innerScope != null) innerScope.close();
}
}
}
};
service.submit(runnable);
}
}
private void createTable() throws IOException {
TraceScope createScope = null;
try {
createScope = Trace.startSpan("createTable", Sampler.ALWAYS);
util.createTable(tableName, familyName);
} finally {
if (createScope != null) createScope.close();
}
}
private void deleteTable() throws IOException {
TraceScope deleteScope = null;
try {
if (admin.tableExists(tableName)) {
deleteScope = Trace.startSpan("deleteTable", Sampler.ALWAYS);
util.deleteTable(tableName);
}
} finally {
if (deleteScope != null) deleteScope.close();
}
}
private LinkedBlockingQueue<Long> insertData() throws IOException, InterruptedException {
LinkedBlockingQueue<Long> rowKeys = new LinkedBlockingQueue<Long>(25000);
HTable ht = new HTable(util.getConfiguration(), this.tableName);
byte[] value = new byte[300];
for (int x = 0; x < 5000; x++) {
TraceScope traceScope = Trace.startSpan("insertData", Sampler.ALWAYS);
try {
ht.setAutoFlush(false);
for (int i = 0; i < 5; i++) {
long rk = random.nextLong();
rowKeys.add(rk);
Put p = new Put(Bytes.toBytes(rk));
for (int y = 0; y < 10; y++) {
random.nextBytes(value);
p.add(Bytes.toBytes(familyName),
Bytes.toBytes(random.nextLong()),
value);
}
ht.put(p);
}
if ((x % 1000) == 0) {
admin.flush(Bytes.toBytes(tableName));
}
} finally {
traceScope.close();
}
}
admin.flush(Bytes.toBytes(tableName));
return rowKeys;
}
private IntegrationTestingUtility createUtil() throws Exception {
Configuration conf = getConf();
if (this.util == null) {
IntegrationTestingUtility u;
if (conf == null) {
u = new IntegrationTestingUtility();
} else {
u = new IntegrationTestingUtility(conf);
}
util = u;
util.initializeCluster(1);
}
return this.util;
}
private void setupReceiver() {
Configuration conf = new Configuration(util.getConfiguration());
conf.setBoolean("hbase.zipkin.is-in-client-mode", true);
this.receiverHost = SpanReceiverHost.getInstance(conf);
}
}

View File

@ -0,0 +1,30 @@
package org.apache.hadoop.hbase.trace;
import org.apache.hadoop.conf.Configuration;
import org.cloudera.htrace.HTraceConfiguration;
public class HBaseHTraceConfiguration extends HTraceConfiguration {
public static final String KEY_PREFIX = "hbase.";
private Configuration conf;
public HBaseHTraceConfiguration(Configuration conf) {
this.conf = conf;
}
@Override
public String get(String key) {
return conf.get(KEY_PREFIX +key);
}
@Override
public String get(String key, String defaultValue) {
return conf.get(KEY_PREFIX + key,defaultValue);
}
@Override
public boolean getBoolean(String key, boolean defaultValue) {
return conf.getBoolean(KEY_PREFIX + key, defaultValue);
}
}

View File

@ -1,89 +0,0 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.trace;
import java.io.IOException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.cloudera.htrace.Span;
import org.cloudera.htrace.SpanReceiver;
import org.cloudera.htrace.Trace;
import org.cloudera.htrace.impl.LocalFileSpanReceiver;
/**
* Wraps the LocalFileSpanReceiver provided in
* org.cloudera.htrace.impl.LocalFileSpanReceiver to read the file name
* destination for spans from hbase-site.xml.
*
* The file path should be added as a property with name
* "hbase.trace.spanreceiver.localfilespanreceiver.filename".
*/
public class HBaseLocalFileSpanReceiver implements SpanReceiver, Configurable {
public static final Log LOG = LogFactory
.getLog(HBaseLocalFileSpanReceiver.class);
public static final String FILE_NAME_CONF_KEY = "hbase.trace.spanreceiver.localfilespanreceiver.filename";
private Configuration conf;
private LocalFileSpanReceiver rcvr;
@Override
public Configuration getConf() {
return conf;
}
@Override
public void setConf(Configuration arg0) {
this.conf = arg0;
// replace rcvr if it was already created
if (rcvr != null) {
try {
rcvr.close();
} catch (IOException e) {
LOG.warn("Error closing LocalFileSpanReceiver.", e);
}
}
try {
rcvr = new LocalFileSpanReceiver(conf.get(FILE_NAME_CONF_KEY));
} catch (IOException e) {
Trace.removeReceiver(this);
rcvr = null;
LOG.warn(
"Unable to initialize LocalFileSpanReceiver, removing owner (HBaseLocalFileSpanReceiver) from receiver list.",
e);
}
}
@Override
public void close() throws IOException {
try{
if (rcvr != null) {
rcvr.close();
}
} finally {
rcvr = null;
}
}
@Override
public void receiveSpan(Span span) {
if (rcvr != null) {
rcvr.receiveSpan(span);
}
}
}