HBASE-7062 Move HLog stats to metrics 2

git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1411337 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
eclark 2012-11-19 18:04:53 +00:00
parent 4aeeda4377
commit 7fdc889169
26 changed files with 428 additions and 134 deletions

View File

@ -19,7 +19,8 @@
package org.apache.hadoop.hbase.metrics;
/**
* BaseSource for dynamic metrics to announce to Metrics2
* BaseSource for dynamic metrics to announce to Metrics2.
* In hbase-hadoop{1|2}-compat there is an implementation of this interface.
*/
public interface BaseSource {

View File

@ -0,0 +1,86 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.metrics.BaseSource;
/**
* Interface of the source that will export metrics about the region server's HLog.
*/
public interface MetricsWALSource extends BaseSource {
/**
* The name of the metrics
*/
static final String METRICS_NAME = "WAL";
/**
* The name of the metrics context that metrics will be under.
*/
static final String METRICS_CONTEXT = "regionserver";
/**
* Description
*/
static final String METRICS_DESCRIPTION = "Metrics about HBase RegionServer HLog";
/**
* The name of the metrics context that metrics will be under in jmx
*/
static final String METRICS_JMX_CONTEXT = "RegionServer,sub=" + METRICS_NAME;
static final String APPEND_TIME = "appendTime";
static final String APPEND_TIME_DESC = "Time an append to the log took.";
static final String APPEND_COUNT = "appendCount";
static final String APPEND_COUNT_DESC = "Number of appends to the write ahead log.";
static final String APPEND_SIZE = "appendSize";
static final String APPEND_SIZE_DESC = "Size (in bytes) of the data appended to the HLog.";
static final String SLOW_APPEND_COUNT = "slowAppendCount";
static final String SLOW_APPEND_COUNT_DESC = "Number of appends that were slow.";
static final String SYNC_TIME = "syncTime";
static final String SYNC_TIME_DESC = "The time it took to sync the HLog to HDFS.";
/**
* Add the append size.
*/
void incrementAppendSize(long size);
/**
* Add the time it took to append.
*/
void incrementAppendTime(long time);
/**
* Increment the count of hlog appends
*/
void incrementAppendCount();
/**
* Increment the number of appends that were slow
*/
void incrementSlowAppendCount();
/**
* Add the time it took to sync the hlog.
*/
void incrementSyncTime(long time);
}

View File

@ -0,0 +1,14 @@
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
public class TestMetricsHLogSource {
@Test(expected=RuntimeException.class)
public void testGetInstanceNoHadoopCompat() throws Exception {
//This should throw an exception because there is no compat lib on the class path.
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
}
}

View File

@ -29,6 +29,8 @@ import org.apache.hadoop.metrics2.lib.MetricMutableHistogram;
/**
* Hadoop1 implementation of MetricsMasterSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsMasterSourceImpl
extends BaseSourceImpl implements MetricsMasterSource {

View File

@ -26,7 +26,10 @@ import org.apache.hadoop.metrics2.lib.*;
import org.apache.hadoop.metrics2.source.JvmMetricsSource;
/**
* Hadoop 1 implementation of BaseSource (using metrics2 framework)
* Hadoop 1 implementation of BaseSource (using metrics2 framework). It handles registration to
* DefaultMetricsSystem and creation of the metrics registry.
*
* All MetricsSource's in hbase-hadoop1-compat should derive from this class.
*/
public class BaseSourceImpl implements BaseSource, MetricsSource {

View File

@ -25,6 +25,8 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
/**
* Hadoop1 implementation of MetricsRegionServerSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsRegionServerSourceImpl
extends BaseSourceImpl implements MetricsRegionServerSource {

View File

@ -0,0 +1,82 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Class that transitions metrics from HLog's MetricsWAL into the metrics subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern.
*/
public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSource {
private final MetricHistogram appendSizeHisto;
private final MetricHistogram appendTimeHisto;
private final MetricMutableCounterLong appendCount;
private final MetricMutableCounterLong slowAppendCount;
private final MetricHistogram syncTimeHisto;
public MetricsWALSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsWALSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
//Create and store the metrics that will be used.
appendTimeHisto = this.getMetricsRegistry().newHistogram(APPEND_TIME, APPEND_TIME_DESC);
appendSizeHisto = this.getMetricsRegistry().newHistogram(APPEND_SIZE, APPEND_SIZE_DESC);
appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0l);
slowAppendCount = this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0l);
syncTimeHisto = this.getMetricsRegistry().newHistogram(SYNC_TIME, SYNC_TIME_DESC);
}
@Override
public void incrementAppendSize(long size) {
appendSizeHisto.add(size);
}
@Override
public void incrementAppendTime(long time) {
appendTimeHisto.add(time);
}
@Override
public void incrementAppendCount() {
appendCount.incr();
}
@Override
public void incrementSlowAppendCount() {
slowAppendCount.incr();
}
@Override
public void incrementSyncTime(long time) {
syncTimeHisto.add(time);
}
}

View File

@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
* Hadoop1 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
MetricsReplicationSource {

View File

@ -24,6 +24,8 @@ import org.apache.hadoop.metrics2.lib.MetricMutableCounterLong;
/**
* Hadoop One implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {

View File

@ -24,6 +24,8 @@ import org.apache.hadoop.metrics2.lib.MetricMutableStat;
/**
* Hadoop 1 version of MetricsThriftServerSource{@link MetricsThriftServerSource}
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
MetricsThriftServerSource {

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl

View File

@ -0,0 +1,19 @@
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
public class TestMetricsWALSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsWALSource walSource =
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
assertTrue(walSource instanceof MetricsWALSourceImpl);
assertSame(walSource,
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class));
}
}

View File

@ -28,6 +28,8 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram;
/**
* Hadoop2 implementation of MetricsMasterSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsMasterSourceImpl
extends BaseSourceImpl implements MetricsMasterSource {

View File

@ -30,7 +30,10 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram;
import org.apache.hadoop.metrics2.source.JvmMetrics;
/**
* Hadoop 2 implementation of BaseSource (using metrics2 framework)
* Hadoop 2 implementation of BaseSource (using metrics2 framework). It handles registration to
* DefaultMetricsSystem and creation of the metrics registry.
*
* All MetricsSource's in hbase-hadoop2-compat should derive from this class.
*/
public class BaseSourceImpl implements BaseSource, MetricsSource {

View File

@ -25,7 +25,9 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder;
import org.apache.hadoop.metrics2.lib.Interns;
/**
* Hadoop1 implementation of MetricsRegionServerSource.
* Hadoop2 implementation of MetricsRegionServerSource.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsRegionServerSourceImpl
extends BaseSourceImpl implements MetricsRegionServerSource {

View File

@ -0,0 +1,81 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
import org.apache.hadoop.metrics2.MetricHistogram;
import org.apache.hadoop.metrics2.lib.MutableCounterLong;
/**
* Class that transitions metrics from HLog's MetricsWAL into the metrics subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern.
*/
public class MetricsWALSourceImpl extends BaseSourceImpl implements MetricsWALSource {
private final MetricHistogram appendSizeHisto;
private final MetricHistogram appendTimeHisto;
private final MetricHistogram syncTimeHisto;
private final MutableCounterLong appendCount;
private final MutableCounterLong slowAppendCount;
public MetricsWALSourceImpl() {
this(METRICS_NAME, METRICS_DESCRIPTION, METRICS_CONTEXT, METRICS_JMX_CONTEXT);
}
public MetricsWALSourceImpl(String metricsName,
String metricsDescription,
String metricsContext,
String metricsJmxContext) {
super(metricsName, metricsDescription, metricsContext, metricsJmxContext);
//Create and store the metrics that will be used.
appendTimeHisto = this.getMetricsRegistry().newHistogram(APPEND_TIME, APPEND_TIME_DESC);
appendSizeHisto = this.getMetricsRegistry().newHistogram(APPEND_SIZE, APPEND_SIZE_DESC);
appendCount = this.getMetricsRegistry().newCounter(APPEND_COUNT, APPEND_COUNT_DESC, 0l);
slowAppendCount = this.getMetricsRegistry().newCounter(SLOW_APPEND_COUNT, SLOW_APPEND_COUNT_DESC, 0l);
syncTimeHisto = this.getMetricsRegistry().newHistogram(SYNC_TIME, SYNC_TIME_DESC);
}
@Override
public void incrementAppendSize(long size) {
appendSizeHisto.add(size);
}
@Override
public void incrementAppendTime(long time) {
appendTimeHisto.add(time);
}
@Override
public void incrementAppendCount() {
appendCount.incr();
}
@Override
public void incrementSlowAppendCount() {
slowAppendCount.incr();
}
@Override
public void incrementSyncTime(long time) {
syncTimeHisto.add(time);
}
}

View File

@ -23,6 +23,8 @@ import org.apache.hadoop.hbase.metrics.BaseSourceImpl;
/**
* Hadoop2 implementation of MetricsReplicationSource. This provides access to metrics gauges and
* counters.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsReplicationSourceImpl extends BaseSourceImpl implements
MetricsReplicationSource {

View File

@ -24,6 +24,8 @@ import org.apache.hadoop.metrics2.lib.MutableCounterLong;
/**
* Hadoop Two implementation of a metrics2 source that will export metrics from the Rest server to
* the hadoop metrics2 subsystem.
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsRESTSourceImpl extends BaseSourceImpl implements MetricsRESTSource {

View File

@ -24,6 +24,8 @@ import org.apache.hadoop.metrics2.lib.MutableStat;
/**
* Hadoop 2 version of MetricsThriftServerSource{@link org.apache.hadoop.hbase.thrift.MetricsThriftServerSource}
*
* Implements BaseSource through BaseSourceImpl, following the pattern
*/
public class MetricsThriftServerSourceImpl extends BaseSourceImpl implements
MetricsThriftServerSource {

View File

@ -0,0 +1 @@
org.apache.hadoop.hbase.regionserver.wal.MetricsWALSourceImpl

View File

@ -0,0 +1,19 @@
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.junit.Test;
import static org.junit.Assert.assertSame;
import static org.junit.Assert.assertTrue;
public class TestMetricsWALSourceImpl {
@Test
public void testGetInstance() throws Exception {
MetricsWALSource walSource =
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
assertTrue(walSource instanceof MetricsWALSourceImpl);
assertSame(walSource,
CompatibilitySingletonFactory.getInstance(MetricsWALSource.class));
}
}

View File

@ -18,22 +18,17 @@
*/
package org.apache.hadoop.hbase.regionserver.wal;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.TreeSet;
@ -45,8 +40,6 @@ import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -56,18 +49,15 @@ import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.Syncable;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.KeyValue;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Reader;
import org.apache.hadoop.hbase.regionserver.wal.HLog.Writer;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.ClassSize;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.HasThread;
import org.apache.hadoop.hbase.util.Threads;
@ -214,7 +204,8 @@ class FSHLog implements HLog, Syncable {
private final int closeErrorsTolerated;
private final AtomicInteger closeErrorCount = new AtomicInteger();
private final MetricsWAL metrics;
/**
* Constructor.
*
@ -365,6 +356,8 @@ class FSHLog implements HLog, Syncable {
Threads.setDaemonThreadRunning(logSyncerThread.getThread(),
Thread.currentThread().getName() + ".logSyncer");
coprocessorHost = new WALCoprocessorHost(this, conf);
this.metrics = new MetricsWAL();
}
// use reflection to search for getDefaultBlockSize(Path f)
@ -1045,7 +1038,7 @@ class FSHLog implements HLog, Syncable {
}
try {
long doneUpto;
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTimeMillis();
// First flush all the pending writes to HDFS. Then
// issue the sync to HDFS. If sync is successful, then update
// syncedTillHere to indicate that transactions till this
@ -1081,7 +1074,7 @@ class FSHLog implements HLog, Syncable {
}
this.syncedTillHere = Math.max(this.syncedTillHere, doneUpto);
HLogMetrics.syncTime.inc(System.currentTimeMillis() - now);
this.metrics.finishSync(EnvironmentEdgeManager.currentTimeMillis() - now);
if (!this.logRollRunning) {
checkLowReplication();
try {
@ -1208,28 +1201,19 @@ class FSHLog implements HLog, Syncable {
}
}
try {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTimeMillis();
// coprocessor hook:
if (!coprocessorHost.preWALWrite(info, logKey, logEdit)) {
// write to our buffer for the Hlog file.
logSyncerThread.append(new FSHLog.Entry(logKey, logEdit));
}
long took = System.currentTimeMillis() - now;
long took = EnvironmentEdgeManager.currentTimeMillis() - now;
coprocessorHost.postWALWrite(info, logKey, logEdit);
HLogMetrics.writeTime.inc(took);
long len = 0;
for (KeyValue kv : logEdit.getKeyValues()) {
len += kv.getLength();
}
HLogMetrics.writeSize.inc(len);
if (took > 1000) {
LOG.warn(String.format(
"%s took %d ms appending an edit to hlog; editcount=%d, len~=%s",
Thread.currentThread().getName(), took, this.numEntries.get(),
StringUtils.humanReadableInt(len)));
HLogMetrics.slowHLogAppendCount.incrementAndGet();
HLogMetrics.slowHLogAppendTime.inc(took);
}
this.metrics.finishAppend(took, len);
} catch (IOException e) {
LOG.fatal("Could not append. Requesting close of hlog", e);
requestLogRoll();
@ -1299,18 +1283,18 @@ class FSHLog implements HLog, Syncable {
}
long txid = 0;
synchronized (updateLock) {
long now = System.currentTimeMillis();
long now = EnvironmentEdgeManager.currentTimeMillis();
WALEdit edit = completeCacheFlushLogEdit();
HLogKey key = makeKey(encodedRegionName, tableName, logSeqId,
System.currentTimeMillis(), HConstants.DEFAULT_CLUSTER_ID);
logSyncerThread.append(new Entry(key, edit));
txid = this.unflushedEntries.incrementAndGet();
HLogMetrics.writeTime.inc(System.currentTimeMillis() - now);
long took = EnvironmentEdgeManager.currentTimeMillis() - now;
long len = 0;
for (KeyValue kv : edit.getKeyValues()) {
len += kv.getLength();
}
HLogMetrics.writeSize.inc(len);
this.metrics.finishAppend(took, len);
this.numEntries.incrementAndGet();
}
// sync txn to file system

View File

@ -36,7 +36,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.HTableDescriptor;
import org.apache.hadoop.hbase.regionserver.wal.HLogMetrics.Metric;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.classification.InterfaceAudience;

View File

@ -1,83 +0,0 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import java.util.concurrent.atomic.AtomicLong;
public class HLogMetrics {
public static class Metric {
public long min = Long.MAX_VALUE;
public long max = 0;
public long total = 0;
public int count = 0;
synchronized void inc(final long val) {
min = Math.min(min, val);
max = Math.max(max, val);
total += val;
++count;
}
synchronized Metric get() {
Metric copy = new Metric();
copy.min = min;
copy.max = max;
copy.total = total;
copy.count = count;
this.min = Long.MAX_VALUE;
this.max = 0;
this.total = 0;
this.count = 0;
return copy;
}
}
// For measuring latency of writes
static Metric writeTime = new Metric();
static Metric writeSize = new Metric();
// For measuring latency of syncs
static Metric syncTime = new Metric();
//For measuring slow HLog appends
static AtomicLong slowHLogAppendCount = new AtomicLong();
static Metric slowHLogAppendTime = new Metric();
public static Metric getWriteTime() {
return writeTime.get();
}
public static Metric getWriteSize() {
return writeSize.get();
}
public static Metric getSyncTime() {
return syncTime.get();
}
public static long getSlowAppendCount() {
return slowHLogAppendCount.get();
}
public static Metric getSlowAppendTime() {
return slowHLogAppendTime.get();
}
}

View File

@ -0,0 +1,61 @@
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.regionserver.wal;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.util.StringUtils;
/**
* Class used to push numbers about the WAL into the metrics subsystem. This will take a
* single function call and turn it into multiple manipulations of the hadoop metrics system.
*/
@InterfaceAudience.Private
public class MetricsWAL {
static final Log LOG = LogFactory.getLog(MetricsWAL.class);
private final MetricsWALSource source;
public MetricsWAL() {
source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
}
public void finishSync(long time) {
source.incrementSyncTime(time);
}
public void finishAppend(long time, long size) {
source.incrementAppendCount();
source.incrementAppendTime(time);
source.incrementAppendSize(size);
if (time > 1000) {
source.incrementSlowAppendCount();
LOG.warn(String.format("%s took %d ms appending an edit to hlog; len~=%s",
Thread.currentThread().getName(),
time,
StringUtils.humanReadableInt(size)));
}
}
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.CompatibilitySingletonFactory;
import org.apache.hadoop.hbase.DoNotRetryIOException;
import org.apache.hadoop.hbase.FailedSanityCheckException;
import org.apache.hadoop.hbase.HBaseConfiguration;
@ -56,13 +57,11 @@ import org.apache.hadoop.hbase.MultithreadedTestUtil.RepeatingTestThread;
import org.apache.hadoop.hbase.client.Append;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Get;
import org.apache.hadoop.hbase.client.HBaseAdmin;
import org.apache.hadoop.hbase.client.HTable;
import org.apache.hadoop.hbase.client.Increment;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.client.Put;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.client.ResultScanner;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.filter.BinaryComparator;
import org.apache.hadoop.hbase.filter.ColumnCountGetFilter;
@ -72,7 +71,6 @@ import org.apache.hadoop.hbase.filter.FilterList;
import org.apache.hadoop.hbase.filter.NullComparator;
import org.apache.hadoop.hbase.filter.PrefixFilter;
import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
import org.apache.hadoop.hbase.master.HMaster;
import org.apache.hadoop.hbase.monitoring.MonitoredRPCHandler;
import org.apache.hadoop.hbase.monitoring.MonitoredTask;
import org.apache.hadoop.hbase.monitoring.TaskMonitor;
@ -82,13 +80,12 @@ import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogFactory;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.regionserver.wal.HLogKey;
import org.apache.hadoop.hbase.regionserver.wal.HLogMetrics;
import org.apache.hadoop.hbase.regionserver.wal.MetricsWALSource;
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
import org.apache.hadoop.hbase.test.MetricsAssertHelper;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManagerTestHelper;
import org.apache.hadoop.hbase.util.IncrementingEnvironmentEdge;
import org.apache.hadoop.hbase.util.ManualEnvironmentEdge;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
@ -130,6 +127,9 @@ public class TestHRegion extends HBaseTestCase {
protected final byte [] row = Bytes.toBytes("rowA");
protected final byte [] row2 = Bytes.toBytes("rowB");
protected final MetricsAssertHelper metricsAssertHelper =
CompatibilitySingletonFactory.getInstance(MetricsAssertHelper.class);
/**
* @see org.apache.hadoop.hbase.HBaseTestCase#setUp()
@ -628,9 +628,10 @@ public class TestHRegion extends HBaseTestCase {
byte[] qual = Bytes.toBytes("qual");
byte[] val = Bytes.toBytes("val");
this.region = initHRegion(b, getName(), cf);
MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
try {
HLogMetrics.getSyncTime(); // clear counter from prior tests
assertEquals(0, HLogMetrics.getSyncTime().count);
long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
LOG.info("First a batch put with all valid puts");
final Put[] puts = new Put[10];
@ -645,7 +646,8 @@ public class TestHRegion extends HBaseTestCase {
assertEquals(OperationStatusCode.SUCCESS, codes[i]
.getOperationStatusCode());
}
assertEquals(1, HLogMetrics.getSyncTime().count);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 1, source);
LOG.info("Next a batch put with one invalid family");
puts[5].add(Bytes.toBytes("BAD_CF"), qual, val);
@ -655,7 +657,8 @@ public class TestHRegion extends HBaseTestCase {
assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
}
assertEquals(1, HLogMetrics.getSyncTime().count);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 2, source);
LOG.info("Next a batch put that has to break into two batches to avoid a lock");
Integer lockedRow = region.obtainRowLock(Bytes.toBytes("row_2"));
@ -676,7 +679,7 @@ public class TestHRegion extends HBaseTestCase {
LOG.info("...waiting for put thread to sync first time");
long startWait = System.currentTimeMillis();
while (HLogMetrics.getSyncTime().count == 0) {
while (metricsAssertHelper.getCounter("syncTimeNumOps", source) == syncs +2 ) {
Thread.sleep(100);
if (System.currentTimeMillis() - startWait > 10000) {
fail("Timed out waiting for thread to sync first minibatch");
@ -687,7 +690,7 @@ public class TestHRegion extends HBaseTestCase {
LOG.info("...joining on thread");
ctx.stop();
LOG.info("...checking that next batch was synced");
assertEquals(1, HLogMetrics.getSyncTime().count);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 4, source);
codes = retFromThread.get();
for (int i = 0; i < 10; i++) {
assertEquals((i == 5) ? OperationStatusCode.BAD_FAMILY :
@ -711,7 +714,7 @@ public class TestHRegion extends HBaseTestCase {
OperationStatusCode.SUCCESS, codes[i].getOperationStatusCode());
}
// Make sure we didn't do an extra batch
assertEquals(1, HLogMetrics.getSyncTime().count);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs + 5, source);
// Make sure we still hold lock
assertTrue(region.isRowLocked(lockedRow));
@ -737,8 +740,9 @@ public class TestHRegion extends HBaseTestCase {
this.region = initHRegion(b, getName(), conf, cf);
try{
HLogMetrics.getSyncTime(); // clear counter from prior tests
assertEquals(0, HLogMetrics.getSyncTime().count);
MetricsWALSource source = CompatibilitySingletonFactory.getInstance(MetricsWALSource.class);
long syncs = metricsAssertHelper.getCounter("syncTimeNumOps", source);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
final Put[] puts = new Put[10];
for (int i = 0; i < 10; i++) {
@ -752,8 +756,7 @@ public class TestHRegion extends HBaseTestCase {
assertEquals(OperationStatusCode.SANITY_CHECK_FAILURE, codes[i]
.getOperationStatusCode());
}
assertEquals(0, HLogMetrics.getSyncTime().count);
metricsAssertHelper.assertCounter("syncTimeNumOps", syncs, source);
} finally {
HRegion.closeHRegion(this.region);