HBASE-6591 checkAndPut executed/not metrics
git-svn-id: https://svn.apache.org/repos/asf/hbase/trunk@1387746 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
527bf4947b
commit
f482cff0c9
|
@ -239,6 +239,8 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
final AtomicLong numPutsWithoutWAL = new AtomicLong(0);
|
||||
final AtomicLong dataInMemoryWithoutWAL = new AtomicLong(0);
|
||||
|
||||
final Counter checkAndMutateChecksPassed = new Counter();
|
||||
final Counter checkAndMutateChecksFailed = new Counter();
|
||||
final Counter readRequestsCount = new Counter();
|
||||
final Counter writeRequestsCount = new Counter();
|
||||
|
||||
|
@ -2416,7 +2418,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
* @param lockId
|
||||
* @param writeToWAL
|
||||
* @throws IOException
|
||||
* @return true if the new put was execute, false otherwise
|
||||
* @return true if the new put was executed, false otherwise
|
||||
*/
|
||||
public boolean checkAndMutate(byte [] row, byte [] family, byte [] qualifier,
|
||||
CompareOp compareOp, ByteArrayComparable comparator, Writable w,
|
||||
|
@ -2498,8 +2500,10 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
prepareDelete(d);
|
||||
internalDelete(d, HConstants.DEFAULT_CLUSTER_ID, writeToWAL);
|
||||
}
|
||||
this.checkAndMutateChecksPassed.increment();
|
||||
return true;
|
||||
}
|
||||
this.checkAndMutateChecksFailed.increment();
|
||||
return false;
|
||||
} finally {
|
||||
if(lockId == null) releaseRowLock(lid);
|
||||
|
@ -4229,6 +4233,10 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
newRegionInfo, a.getTableDesc(), null);
|
||||
dstRegion.readRequestsCount.set(a.readRequestsCount.get() + b.readRequestsCount.get());
|
||||
dstRegion.writeRequestsCount.set(a.writeRequestsCount.get() + b.writeRequestsCount.get());
|
||||
dstRegion.checkAndMutateChecksFailed.set(
|
||||
a.checkAndMutateChecksFailed.get() + b.checkAndMutateChecksFailed.get());
|
||||
dstRegion.checkAndMutateChecksPassed.set(
|
||||
a.checkAndMutateChecksPassed.get() + b.checkAndMutateChecksPassed.get());
|
||||
dstRegion.initialize();
|
||||
dstRegion.compactStores();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
|
@ -5034,7 +5042,7 @@ public class HRegion implements HeapSize { // , Writable{
|
|||
public static final long FIXED_OVERHEAD = ClassSize.align(
|
||||
ClassSize.OBJECT +
|
||||
ClassSize.ARRAY +
|
||||
37 * ClassSize.REFERENCE + Bytes.SIZEOF_INT +
|
||||
39 * ClassSize.REFERENCE + Bytes.SIZEOF_INT +
|
||||
(7 * Bytes.SIZEOF_LONG) +
|
||||
Bytes.SIZEOF_BOOLEAN);
|
||||
|
||||
|
|
|
@ -1453,6 +1453,8 @@ public class HRegionServer implements ClientProtocol,
|
|||
long memstoreSize = 0;
|
||||
int readRequestsCount = 0;
|
||||
int writeRequestsCount = 0;
|
||||
long checkAndMutateChecksFailed = 0;
|
||||
long checkAndMutateChecksPassed = 0;
|
||||
long storefileIndexSize = 0;
|
||||
HDFSBlocksDistribution hdfsBlocksDistribution =
|
||||
new HDFSBlocksDistribution();
|
||||
|
@ -1476,6 +1478,8 @@ public class HRegionServer implements ClientProtocol,
|
|||
dataInMemoryWithoutWAL += r.dataInMemoryWithoutWAL.get();
|
||||
readRequestsCount += r.readRequestsCount.get();
|
||||
writeRequestsCount += r.writeRequestsCount.get();
|
||||
checkAndMutateChecksFailed += r.checkAndMutateChecksFailed.get();
|
||||
checkAndMutateChecksPassed += r.checkAndMutateChecksPassed.get();
|
||||
synchronized (r.stores) {
|
||||
stores += r.stores.size();
|
||||
for (Map.Entry<byte[], Store> ee : r.stores.entrySet()) {
|
||||
|
@ -1549,6 +1553,8 @@ public class HRegionServer implements ClientProtocol,
|
|||
(int) (totalStaticBloomSize / 1024));
|
||||
this.metrics.readRequestsCount.set(readRequestsCount);
|
||||
this.metrics.writeRequestsCount.set(writeRequestsCount);
|
||||
this.metrics.checkAndMutateChecksFailed.set(checkAndMutateChecksFailed);
|
||||
this.metrics.checkAndMutateChecksPassed.set(checkAndMutateChecksPassed);
|
||||
this.metrics.compactionQueueSize.set(compactSplitThread
|
||||
.getCompactionQueueSize());
|
||||
this.metrics.flushQueueSize.set(cacheFlusher
|
||||
|
|
|
@ -160,6 +160,17 @@ public class RegionServerMetrics implements Updater {
|
|||
public final MetricsLongValue writeRequestsCount =
|
||||
new MetricsLongValue("writeRequestsCount", registry);
|
||||
|
||||
/**
|
||||
* Count of checkAndMutates the failed the check
|
||||
*/
|
||||
public final MetricsLongValue checkAndMutateChecksFailed =
|
||||
new MetricsLongValue("checkAndMutateChecksFailed", registry);
|
||||
|
||||
/**
|
||||
* Count of checkAndMutates that passed the check
|
||||
*/
|
||||
public final MetricsLongValue checkAndMutateChecksPassed =
|
||||
new MetricsLongValue("checkAndMutateChecksPassed", registry);
|
||||
/**
|
||||
*/
|
||||
public final MetricsIntValue storefileIndexSizeMB =
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.hbase.client.Put;
|
|||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.RegionMetricsStorage;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.RegionServerMetrics;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics;
|
||||
import org.apache.hadoop.hbase.regionserver.metrics.SchemaMetrics.
|
||||
StoreMetricType;
|
||||
|
@ -124,7 +125,7 @@ public class TestRegionServerMetrics {
|
|||
RegionMetricsStorage.getNumericMetric(storeMetricName)
|
||||
- (startValue != null ? startValue : 0));
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testOperationMetrics() throws IOException {
|
||||
String cf = "OPCF";
|
||||
|
@ -198,6 +199,55 @@ public class TestRegionServerMetrics {
|
|||
|
||||
}
|
||||
|
||||
private void assertCheckAndMutateMetrics(final HRegionServer rs,
|
||||
long expectedPassed, long expectedFailed) {
|
||||
rs.doMetrics();
|
||||
RegionServerMetrics metrics = rs.getMetrics();
|
||||
assertEquals("checkAndMutatePassed metrics incorrect",
|
||||
expectedPassed, metrics.checkAndMutateChecksPassed.get());
|
||||
assertEquals("checkAndMutateFailed metrics incorrect",
|
||||
expectedFailed, metrics.checkAndMutateChecksFailed.get());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCheckAndMutateMetrics() throws Exception {
|
||||
final HRegionServer rs =
|
||||
TEST_UTIL.getMiniHBaseCluster().getRegionServer(0);
|
||||
byte [] tableName = Bytes.toBytes("testCheckAndMutateMetrics");
|
||||
byte [] family = Bytes.toBytes("family");
|
||||
byte [] qualifier = Bytes.toBytes("qualifier");
|
||||
byte [] row = Bytes.toBytes("row1");
|
||||
HTable table = TEST_UTIL.createTable(tableName, family);
|
||||
long expectedPassed = 0;
|
||||
long expectedFailed = 0;
|
||||
|
||||
// checkAndPut success
|
||||
Put put = new Put(row);
|
||||
byte [] val1 = Bytes.toBytes("val1");
|
||||
put.add(family, qualifier, val1);
|
||||
table.checkAndPut(row, family, qualifier, null, put);
|
||||
expectedPassed++;
|
||||
assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed);
|
||||
|
||||
// checkAndPut failure
|
||||
byte [] val2 = Bytes.toBytes("val2");
|
||||
table.checkAndPut(row, family, qualifier, val2, put);
|
||||
expectedFailed++;
|
||||
assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed);
|
||||
|
||||
// checkAndDelete success
|
||||
Delete delete = new Delete(row);
|
||||
delete.deleteColumn(family, qualifier);
|
||||
table.checkAndDelete(row, family, qualifier, val1, delete);
|
||||
expectedPassed++;
|
||||
assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed);
|
||||
|
||||
// checkAndDelete failure
|
||||
table.checkAndDelete(row, family, qualifier, val1, delete);
|
||||
expectedFailed++;
|
||||
assertCheckAndMutateMetrics(rs, expectedPassed, expectedFailed);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRemoveRegionMetrics() throws IOException, InterruptedException {
|
||||
String cf = "REMOVECF";
|
||||
|
|
Loading…
Reference in New Issue