HBASE-17584 Expose ScanMetrics with ResultScanner rather than Scan

This commit is contained in:
zhangduo 2017-02-24 14:08:10 +08:00
parent aace02a230
commit a49bc58a54
12 changed files with 64 additions and 34 deletions

View File

@ -38,13 +38,11 @@ public abstract class AbstractClientScanner implements ResultScanner {
}
/**
* Used internally accumulating metrics on scan. To
* enable collection of metrics on a Scanner, call {@link Scan#setScanMetricsEnabled(boolean)}.
* These metrics are cleared at key transition points. Metrics are accumulated in the
* {@link Scan} object itself.
* @see Scan#getScanMetrics()
* Used internally accumulating metrics on scan. To enable collection of metrics on a Scanner,
* call {@link Scan#setScanMetricsEnabled(boolean)}.
* @return Returns the running {@link ScanMetrics} instance or null if scan metrics not enabled.
*/
@Override
public ScanMetrics getScanMetrics() {
return scanMetrics;
}

View File

@ -30,6 +30,7 @@ import java.util.Queue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
/**
* The {@link ResultScanner} implementation for {@link AsyncTable}. It will fetch data automatically
@ -164,4 +165,9 @@ class AsyncTableResultScanner implements ResultScanner, RawScanResultConsumer {
synchronized boolean isSuspended() {
return resumer != null;
}
@Override
public ScanMetrics getScanMetrics() {
throw new UnsupportedOperationException();
}
}

View File

@ -51,7 +51,6 @@ import org.apache.hadoop.hbase.exceptions.ScannerResetException;
import org.apache.hadoop.hbase.ipc.RpcControllerFactory;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.shaded.protobuf.ProtobufUtil;
import org.apache.hadoop.hbase.shaded.protobuf.generated.MapReduceProtos;
import org.apache.hadoop.hbase.util.Bytes;
/**
@ -301,15 +300,17 @@ public abstract class ClientScanner extends AbstractClientScanner {
* for scan/map reduce scenarios, we will have multiple scans running at the same time. By
* default, scan metrics are disabled; if the application wants to collect them, this behavior can
* be turned on by calling calling {@link Scan#setScanMetricsEnabled(boolean)}
* <p>
* This invocation clears the scan metrics. Metrics are aggregated in the Scan instance.
*/
protected void writeScanMetrics() {
if (this.scanMetrics == null || scanMetricsPublished) {
return;
}
MapReduceProtos.ScanMetrics pScanMetrics = ProtobufUtil.toScanMetrics(scanMetrics);
scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA, pScanMetrics.toByteArray());
// Publish ScanMetrics to the Scan Object.
// As we have claimed in the comment of Scan.getScanMetrics, this relies on that user will not
// call ResultScanner.getScanMetrics and reset the ScanMetrics. Otherwise the metrics published
// to Scan will be messed up.
scan.setAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA,
ProtobufUtil.toScanMetrics(scanMetrics, false).toByteArray());
scanMetricsPublished = true;
}

View File

@ -27,6 +27,7 @@ import java.util.List;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.hbase.classification.InterfaceStability;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
/**
* Interface for client-side scanning. Go to {@link Table} to obtain instances.
@ -116,4 +117,9 @@ public interface ResultScanner extends Closeable, Iterable<Result> {
* @return true if the lease was successfully renewed, false otherwise.
*/
boolean renewLease();
/**
* @return the scan metrics, or {@code null} if we do not enable metrics.
*/
ScanMetrics getScanMetrics();
}

View File

@ -1081,9 +1081,13 @@ public class Scan extends Query {
/**
* @return Metrics on this Scan, if metrics were enabled.
* @see #setScanMetricsEnabled(boolean)
* @deprecated Use {@link ResultScanner#getScanMetrics()} instead. And notice that, please do not
* use this method and {@link ResultScanner#getScanMetrics()} together, the metrics
* will be messed up.
*/
@Deprecated
public ScanMetrics getScanMetrics() {
byte [] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
byte[] bytes = getAttribute(Scan.SCAN_ATTRIBUTES_METRICS_DATA);
if (bytes == null) return null;
return ProtobufUtil.toScanMetrics(bytes);
}

View File

@ -106,11 +106,20 @@ public class ServerSideScanMetrics {
* @return A Map of String -&gt; Long for metrics
*/
public Map<String, Long> getMetricsMap() {
return getMetricsMap(true);
}
/**
* Get all of the values. If reset is true, we will reset the all AtomicLongs back to 0.
* @param reset whether to reset the AtomicLongs to 0.
* @return A Map of String -&gt; Long for metrics
*/
public Map<String, Long> getMetricsMap(boolean reset) {
// Create a builder
ImmutableMap.Builder<String, Long> builder = ImmutableMap.builder();
// For every entry add the value and reset the AtomicLong back to zero
for (Map.Entry<String, AtomicLong> e : this.counters.entrySet()) {
builder.put(e.getKey(), e.getValue().getAndSet(0));
long value = reset ? e.getValue().getAndSet(0) : e.getValue().get();
builder.put(e.getKey(), value);
}
// Build the immutable map so that people can't mess around with it.
return builder.build();

View File

@ -83,7 +83,6 @@ import org.apache.hadoop.hbase.io.LimitInputStream;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.master.RegionState;
import org.apache.hadoop.hbase.protobuf.ProtobufMagic;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.quotas.QuotaScope;
import org.apache.hadoop.hbase.quotas.QuotaType;
import org.apache.hadoop.hbase.quotas.ThrottleType;
@ -95,7 +94,6 @@ import org.apache.hadoop.hbase.shaded.com.google.protobuf.ByteString;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.CodedInputStream;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.InvalidProtocolBufferException;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Message;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Parser;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcChannel;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.RpcController;
import org.apache.hadoop.hbase.shaded.com.google.protobuf.Service;
@ -164,6 +162,7 @@ import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.FlushDescript
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.RegionEventDescriptor.EventType;
import org.apache.hadoop.hbase.shaded.protobuf.generated.WALProtos.StoreDescriptor;
import org.apache.hadoop.hbase.shaded.protobuf.generated.ZooKeeperProtos;
import org.apache.hadoop.hbase.util.Addressing;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.DynamicClassLoader;
@ -2043,12 +2042,11 @@ public final class ProtobufUtil {
}
public static ScanMetrics toScanMetrics(final byte[] bytes) {
Parser<MapReduceProtos.ScanMetrics> parser = MapReduceProtos.ScanMetrics.PARSER;
MapReduceProtos.ScanMetrics pScanMetrics = null;
try {
pScanMetrics = parser.parseFrom(bytes);
pScanMetrics = MapReduceProtos.ScanMetrics.parseFrom(bytes);
} catch (InvalidProtocolBufferException e) {
//Ignored there are just no key values to add.
// Ignored there are just no key values to add.
}
ScanMetrics scanMetrics = new ScanMetrics();
if (pScanMetrics != null) {
@ -2061,15 +2059,12 @@ public final class ProtobufUtil {
return scanMetrics;
}
public static MapReduceProtos.ScanMetrics toScanMetrics(ScanMetrics scanMetrics) {
public static MapReduceProtos.ScanMetrics toScanMetrics(ScanMetrics scanMetrics, boolean reset) {
MapReduceProtos.ScanMetrics.Builder builder = MapReduceProtos.ScanMetrics.newBuilder();
Map<String, Long> metrics = scanMetrics.getMetricsMap();
Map<String, Long> metrics = scanMetrics.getMetricsMap(reset);
for (Entry<String, Long> e : metrics.entrySet()) {
HBaseProtos.NameInt64Pair nameInt64Pair =
HBaseProtos.NameInt64Pair.newBuilder()
.setName(e.getKey())
.setValue(e.getValue())
.build();
HBaseProtos.NameInt64Pair.newBuilder().setName(e.getKey()).setValue(e.getValue()).build();
builder.addMetrics(nameInt64Pair);
}
return builder.build();

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@ -641,6 +642,11 @@ public class RemoteHTable implements Table {
public boolean renewLease() {
throw new RuntimeException("renewLease() not supported");
}
@Override
public ScanMetrics getScanMetrics() {
throw new RuntimeException("getScanMetrics() not supported");
}
}
@Override

View File

@ -52,7 +52,6 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
public ClientSideRegionScanner(Configuration conf, FileSystem fs,
Path rootDir, HTableDescriptor htd, HRegionInfo hri, Scan scan, ScanMetrics scanMetrics)
throws IOException {
// region is immutable, set isolation level
scan.setIsolationLevel(IsolationLevel.READ_UNCOMMITTED);

View File

@ -81,7 +81,7 @@ public class TableRecordReaderImpl {
*/
public void restart(byte[] firstRow) throws IOException {
currentScan = new Scan(scan);
currentScan.setStartRow(firstRow);
currentScan.withStartRow(firstRow);
currentScan.setScanMetricsEnabled(true);
if (this.scanner != null) {
if (logScannerActivity) {
@ -273,7 +273,7 @@ public class TableRecordReaderImpl {
* @throws IOException
*/
private void updateCounters() throws IOException {
ScanMetrics scanMetrics = currentScan.getScanMetrics();
ScanMetrics scanMetrics = scanner.getScanMetrics();
if (scanMetrics == null) {
return;
}

View File

@ -192,15 +192,15 @@ public class TestServerSideScanMetricsFromClientSide {
for (int i = 0; i < ROWS.length - 1; i++) {
scan = new Scan(baseScan);
scan.setStartRow(ROWS[0]);
scan.setStopRow(ROWS[i + 1]);
scan.withStartRow(ROWS[0]);
scan.withStopRow(ROWS[i + 1]);
testMetric(scan, ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME, i + 1);
}
for (int i = ROWS.length - 1; i > 0; i--) {
scan = new Scan(baseScan);
scan.setStartRow(ROWS[i - 1]);
scan.setStopRow(ROWS[ROWS.length - 1]);
scan.withStartRow(ROWS[i - 1]);
scan.withStopRow(ROWS[ROWS.length - 1]);
testMetric(scan, ServerSideScanMetrics.COUNT_OF_ROWS_SCANNED_KEY_METRIC_NAME, ROWS.length - i);
}
@ -318,12 +318,12 @@ public class TestServerSideScanMetricsFromClientSide {
public void testMetric(Scan scan, String metricKey, long expectedValue) throws Exception {
assertTrue("Scan should be configured to record metrics", scan.isScanMetricsEnabled());
ResultScanner scanner = TABLE.getScanner(scan);
// Iterate through all the results
for (Result r : scanner) {
while (scanner.next() != null) {
}
scanner.close();
ScanMetrics metrics = scan.getScanMetrics();
ScanMetrics metrics = scanner.getScanMetrics();
assertTrue("Metrics are null", metrics != null);
assertTrue("Metric : " + metricKey + " does not exist", metrics.hasCounter(metricKey));
final long actualMetricValue = metrics.getCounter(metricKey).get();

View File

@ -42,6 +42,7 @@ import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Call;
import org.apache.hadoop.hbase.client.coprocessor.Batch.Callback;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
import org.apache.hadoop.hbase.ipc.CoprocessorRpcChannel;
@ -172,6 +173,11 @@ public class RegionAsTable implements Table {
public boolean renewLease() {
throw new UnsupportedOperationException();
}
@Override
public ScanMetrics getScanMetrics() {
throw new UnsupportedOperationException();
}
};
@Override