diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
index a6e846eb28c..5bf2ec7e354 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java
@@ -105,6 +105,13 @@ public class RegionLoad {
return regionLoadPB.getReadRequestsCount();
}
+ /**
+ * @return the number of filtered read requests made to region
+ */
+ public long getFilteredReadRequestsCount() {
+ return regionLoadPB.getFilteredReadRequestsCount();
+ }
+
/**
* @return the number of write requests made to region
*/
diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
index 60fae8599d0..1ddcc2059e8 100644
--- a/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
+++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/ServerLoad.java
@@ -49,6 +49,7 @@ public class ServerLoad {
private int memstoreSizeMB = 0;
private int storefileIndexSizeMB = 0;
private long readRequestsCount = 0;
+ private long filteredReadRequestsCount = 0;
private long writeRequestsCount = 0;
private int rootIndexSizeKB = 0;
private int totalStaticIndexSizeKB = 0;
@@ -66,6 +67,7 @@ public class ServerLoad {
memstoreSizeMB += rl.getMemstoreSizeMB();
storefileIndexSizeMB += rl.getStorefileIndexSizeMB();
readRequestsCount += rl.getReadRequestsCount();
+ filteredReadRequestsCount += rl.getFilteredReadRequestsCount();
writeRequestsCount += rl.getWriteRequestsCount();
rootIndexSizeKB += rl.getRootIndexSizeKB();
totalStaticIndexSizeKB += rl.getTotalStaticIndexSizeKB();
@@ -145,6 +147,10 @@ public class ServerLoad {
return readRequestsCount;
}
+ public long getFilteredReadRequestsCount() {
+ return filteredReadRequestsCount;
+ }
+
public long getWriteRequestsCount() {
return writeRequestsCount;
}
@@ -297,6 +303,8 @@ public class ServerLoad {
Strings.appendKeyValue(sb, "storefileIndexSizeMB",
Integer.valueOf(this.storefileIndexSizeMB));
sb = Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount));
+ sb = Strings.appendKeyValue(sb, "filteredReadRequestsCount",
+ Long.valueOf(this.filteredReadRequestsCount));
sb = Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount));
sb = Strings.appendKeyValue(sb, "rootIndexSizeKB", Integer.valueOf(this.rootIndexSizeKB));
sb =
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
index ee0217a206f..e4df1c0f104 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSource.java
@@ -162,6 +162,9 @@ public interface MetricsRegionServerSource extends BaseSource {
String READ_REQUEST_COUNT = "readRequestCount";
String READ_REQUEST_COUNT_DESC =
"Number of read requests this region server has answered.";
+ String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
+ String FILTERED_READ_REQUEST_COUNT_DESC =
+ "Number of filtered read requests this region server has answered.";
String WRITE_REQUEST_COUNT = "writeRequestCount";
String WRITE_REQUEST_COUNT_DESC =
"Number of mutation requests this region server has answered.";
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
index 02dec8d8583..07c3773aa68 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapper.java
@@ -112,6 +112,11 @@ public interface MetricsRegionServerWrapper {
*/
long getReadRequestsCount();
+ /**
+ * Get the number of filtered read requests to regions hosted on this region server.
+ */
+ long getFilteredReadRequestsCount();
+
/**
* Get the number of write requests to regions hosted on this region server.
*/
diff --git a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
index 0997f7c8d54..20ca9bdf942 100644
--- a/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
+++ b/hbase-hadoop-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapper.java
@@ -69,6 +69,11 @@ public interface MetricsRegionWrapper {
*/
long getReadRequestCount();
+ /**
+ * Get the total number of filtered read requests that have been issued against this region
+ */
+ long getFilteredReadRequestCount();
+
/**
* Get the total number of mutations that have been issued against this region.
*/
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
index f40811c55e8..42476a71583 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerSourceImpl.java
@@ -201,6 +201,8 @@ public class MetricsRegionServerSourceImpl
rsWrap.getTotalRequestCount())
.addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC),
rsWrap.getReadRequestsCount())
+ .addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT, FILTERED_READ_REQUEST_COUNT_DESC),
+ rsWrap.getFilteredReadRequestsCount())
.addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC),
rsWrap.getWriteRequestsCount())
.addCounter(Interns.info(CHECK_MUTATE_FAILED_COUNT, CHECK_MUTATE_FAILED_COUNT_DESC),
diff --git a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
index 1df72d51723..fab6b515f65 100644
--- a/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
+++ b/hbase-hadoop2-compat/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionSourceImpl.java
@@ -236,6 +236,10 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
MetricsRegionServerSource.READ_REQUEST_COUNT_DESC),
this.regionWrapper.getReadRequestCount());
+ mrb.addCounter(Interns.info(
+ regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT,
+ MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC),
+ this.regionWrapper.getFilteredReadRequestCount());
mrb.addCounter(Interns.info(
regionNamePrefix + MetricsRegionServerSource.WRITE_REQUEST_COUNT,
MetricsRegionServerSource.WRITE_REQUEST_COUNT_DESC),
diff --git a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
index 30882600ce0..19624aadddc 100644
--- a/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
+++ b/hbase-hadoop2-compat/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionSourceImpl.java
@@ -106,6 +106,11 @@ public class TestMetricsRegionSourceImpl {
return 0;
}
+ @Override
+ public long getFilteredReadRequestCount() {
+ return 0;
+ }
+
@Override
public long getWriteRequestCount() {
return 0;
diff --git a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
index 5fd4e181ba3..048f5cfaa23 100644
--- a/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
+++ b/hbase-protocol/src/main/java/org/apache/hadoop/hbase/protobuf/generated/ClusterStatusProtos.java
@@ -3613,6 +3613,24 @@ public final class ClusterStatusProtos {
*/
org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceIdOrBuilder getStoreCompleteSequenceIdOrBuilder(
int index);
+
+ // optional uint64 filtered_read_requests_count = 19;
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ boolean hasFilteredReadRequestsCount();
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ long getFilteredReadRequestsCount();
}
/**
* Protobuf type {@code hbase.pb.RegionLoad}
@@ -3766,6 +3784,11 @@ public final class ClusterStatusProtos {
storeCompleteSequenceId_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.StoreSequenceId.PARSER, extensionRegistry));
break;
}
+ case 152: {
+ bitField0_ |= 0x00020000;
+ filteredReadRequestsCount_ = input.readUInt64();
+ break;
+ }
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@@ -4283,6 +4306,30 @@ public final class ClusterStatusProtos {
return storeCompleteSequenceId_.get(index);
}
+ // optional uint64 filtered_read_requests_count = 19;
+ public static final int FILTERED_READ_REQUESTS_COUNT_FIELD_NUMBER = 19;
+ private long filteredReadRequestsCount_;
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ public boolean hasFilteredReadRequestsCount() {
+ return ((bitField0_ & 0x00020000) == 0x00020000);
+ }
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ public long getFilteredReadRequestsCount() {
+ return filteredReadRequestsCount_;
+ }
+
private void initFields() {
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
stores_ = 0;
@@ -4302,6 +4349,7 @@ public final class ClusterStatusProtos {
dataLocality_ = 0F;
lastMajorCompactionTs_ = 0L;
storeCompleteSequenceId_ = java.util.Collections.emptyList();
+ filteredReadRequestsCount_ = 0L;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@@ -4383,6 +4431,9 @@ public final class ClusterStatusProtos {
for (int i = 0; i < storeCompleteSequenceId_.size(); i++) {
output.writeMessage(18, storeCompleteSequenceId_.get(i));
}
+ if (((bitField0_ & 0x00020000) == 0x00020000)) {
+ output.writeUInt64(19, filteredReadRequestsCount_);
+ }
getUnknownFields().writeTo(output);
}
@@ -4464,6 +4515,10 @@ public final class ClusterStatusProtos {
size += com.google.protobuf.CodedOutputStream
.computeMessageSize(18, storeCompleteSequenceId_.get(i));
}
+ if (((bitField0_ & 0x00020000) == 0x00020000)) {
+ size += com.google.protobuf.CodedOutputStream
+ .computeUInt64Size(19, filteredReadRequestsCount_);
+ }
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@@ -4573,6 +4628,11 @@ public final class ClusterStatusProtos {
}
result = result && getStoreCompleteSequenceIdList()
.equals(other.getStoreCompleteSequenceIdList());
+ result = result && (hasFilteredReadRequestsCount() == other.hasFilteredReadRequestsCount());
+ if (hasFilteredReadRequestsCount()) {
+ result = result && (getFilteredReadRequestsCount()
+ == other.getFilteredReadRequestsCount());
+ }
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@@ -4659,6 +4719,10 @@ public final class ClusterStatusProtos {
hash = (37 * hash) + STORE_COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
hash = (53 * hash) + getStoreCompleteSequenceIdList().hashCode();
}
+ if (hasFilteredReadRequestsCount()) {
+ hash = (37 * hash) + FILTERED_READ_REQUESTS_COUNT_FIELD_NUMBER;
+ hash = (53 * hash) + hashLong(getFilteredReadRequestsCount());
+ }
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@@ -4814,6 +4878,8 @@ public final class ClusterStatusProtos {
} else {
storeCompleteSequenceIdBuilder_.clear();
}
+ filteredReadRequestsCount_ = 0L;
+ bitField0_ = (bitField0_ & ~0x00040000);
return this;
}
@@ -4923,6 +4989,10 @@ public final class ClusterStatusProtos {
} else {
result.storeCompleteSequenceId_ = storeCompleteSequenceIdBuilder_.build();
}
+ if (((from_bitField0_ & 0x00040000) == 0x00040000)) {
+ to_bitField0_ |= 0x00020000;
+ }
+ result.filteredReadRequestsCount_ = filteredReadRequestsCount_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@@ -5016,6 +5086,9 @@ public final class ClusterStatusProtos {
}
}
}
+ if (other.hasFilteredReadRequestsCount()) {
+ setFilteredReadRequestsCount(other.getFilteredReadRequestsCount());
+ }
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@@ -6306,6 +6379,55 @@ public final class ClusterStatusProtos {
return storeCompleteSequenceIdBuilder_;
}
+ // optional uint64 filtered_read_requests_count = 19;
+ private long filteredReadRequestsCount_ ;
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ public boolean hasFilteredReadRequestsCount() {
+ return ((bitField0_ & 0x00040000) == 0x00040000);
+ }
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ public long getFilteredReadRequestsCount() {
+ return filteredReadRequestsCount_;
+ }
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ public Builder setFilteredReadRequestsCount(long value) {
+ bitField0_ |= 0x00040000;
+ filteredReadRequestsCount_ = value;
+ onChanged();
+ return this;
+ }
+ /**
+ * optional uint64 filtered_read_requests_count = 19;
+ *
+ *
+ ** the current total filtered read requests made to region
+ *
+ */
+ public Builder clearFilteredReadRequestsCount() {
+ bitField0_ = (bitField0_ & ~0x00040000);
+ filteredReadRequestsCount_ = 0L;
+ onChanged();
+ return this;
+ }
+
// @@protoc_insertion_point(builder_scope:hbase.pb.RegionLoad)
}
@@ -14723,7 +14845,7 @@ public final class ClusterStatusProtos {
"e\030\001 \002(\014\022\023\n\013sequence_id\030\002 \002(\004\"p\n\026RegionSt" +
"oreSequenceIds\022 \n\030last_flushed_sequence_" +
"id\030\001 \002(\004\0224\n\021store_sequence_id\030\002 \003(\0132\031.hb" +
- "ase.pb.StoreSequenceId\"\324\004\n\nRegionLoad\0223\n" +
+ "ase.pb.StoreSequenceId\"\372\004\n\nRegionLoad\0223\n" +
"\020region_specifier\030\001 \002(\0132\031.hbase.pb.Regio" +
"nSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\nstorefiles",
"\030\003 \001(\r\022\"\n\032store_uncompressed_size_MB\030\004 \001" +
@@ -14738,38 +14860,39 @@ public final class ClusterStatusProtos {
"_sequence_id\030\017 \001(\004\022\025\n\rdata_locality\030\020 \001(",
"\002\022#\n\030last_major_compaction_ts\030\021 \001(\004:\0010\022=" +
"\n\032store_complete_sequence_id\030\022 \003(\0132\031.hba" +
- "se.pb.StoreSequenceId\"T\n\023ReplicationLoad" +
- "Sink\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022!\n\031time" +
- "StampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025Replicat" +
- "ionLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022ageOfLa" +
- "stShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQueue\030\003 \002(" +
- "\r\022 \n\030timeStampOfLastShippedOp\030\004 \002(\004\022\026\n\016r" +
- "eplicationLag\030\005 \002(\004\"\212\003\n\nServerLoad\022\032\n\022nu" +
- "mber_of_requests\030\001 \001(\004\022 \n\030total_number_o",
- "f_requests\030\002 \001(\004\022\024\n\014used_heap_MB\030\003 \001(\r\022\023" +
- "\n\013max_heap_MB\030\004 \001(\r\022*\n\014region_loads\030\005 \003(" +
- "\0132\024.hbase.pb.RegionLoad\022+\n\014coprocessors\030" +
- "\006 \003(\0132\025.hbase.pb.Coprocessor\022\031\n\021report_s" +
- "tart_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004" +
- "\022\030\n\020info_server_port\030\t \001(\r\0227\n\016replLoadSo" +
- "urce\030\n \003(\0132\037.hbase.pb.ReplicationLoadSou" +
- "rce\0223\n\014replLoadSink\030\013 \001(\0132\035.hbase.pb.Rep" +
- "licationLoadSink\"a\n\016LiveServerInfo\022$\n\006se" +
- "rver\030\001 \002(\0132\024.hbase.pb.ServerName\022)\n\013serv",
- "er_load\030\002 \002(\0132\024.hbase.pb.ServerLoad\"\250\003\n\r" +
- "ClusterStatus\0228\n\rhbase_version\030\001 \001(\0132!.h" +
- "base.pb.HBaseVersionFileContent\022.\n\014live_" +
- "servers\030\002 \003(\0132\030.hbase.pb.LiveServerInfo\022" +
- "*\n\014dead_servers\030\003 \003(\0132\024.hbase.pb.ServerN" +
- "ame\022;\n\025regions_in_transition\030\004 \003(\0132\034.hba" +
- "se.pb.RegionInTransition\022\'\n\ncluster_id\030\005" +
- " \001(\0132\023.hbase.pb.ClusterId\0222\n\023master_copr" +
- "ocessors\030\006 \003(\0132\025.hbase.pb.Coprocessor\022$\n" +
- "\006master\030\007 \001(\0132\024.hbase.pb.ServerName\022,\n\016b",
- "ackup_masters\030\010 \003(\0132\024.hbase.pb.ServerNam" +
- "e\022\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.had" +
- "oop.hbase.protobuf.generatedB\023ClusterSta" +
- "tusProtosH\001\240\001\001"
+ "se.pb.StoreSequenceId\022$\n\034filtered_read_r" +
+ "equests_count\030\023 \001(\004\"T\n\023ReplicationLoadSi" +
+ "nk\022\032\n\022ageOfLastAppliedOp\030\001 \002(\004\022!\n\031timeSt" +
+ "ampsOfLastAppliedOp\030\002 \002(\004\"\225\001\n\025Replicatio" +
+ "nLoadSource\022\016\n\006peerID\030\001 \002(\t\022\032\n\022ageOfLast" +
+ "ShippedOp\030\002 \002(\004\022\026\n\016sizeOfLogQueue\030\003 \002(\r\022" +
+ " \n\030timeStampOfLastShippedOp\030\004 \002(\004\022\026\n\016rep" +
+ "licationLag\030\005 \002(\004\"\212\003\n\nServerLoad\022\032\n\022numb",
+ "er_of_requests\030\001 \001(\004\022 \n\030total_number_of_" +
+ "requests\030\002 \001(\004\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013" +
+ "max_heap_MB\030\004 \001(\r\022*\n\014region_loads\030\005 \003(\0132" +
+ "\024.hbase.pb.RegionLoad\022+\n\014coprocessors\030\006 " +
+ "\003(\0132\025.hbase.pb.Coprocessor\022\031\n\021report_sta" +
+ "rt_time\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030" +
+ "\n\020info_server_port\030\t \001(\r\0227\n\016replLoadSour" +
+ "ce\030\n \003(\0132\037.hbase.pb.ReplicationLoadSourc" +
+ "e\0223\n\014replLoadSink\030\013 \001(\0132\035.hbase.pb.Repli" +
+ "cationLoadSink\"a\n\016LiveServerInfo\022$\n\006serv",
+ "er\030\001 \002(\0132\024.hbase.pb.ServerName\022)\n\013server" +
+ "_load\030\002 \002(\0132\024.hbase.pb.ServerLoad\"\250\003\n\rCl" +
+ "usterStatus\0228\n\rhbase_version\030\001 \001(\0132!.hba" +
+ "se.pb.HBaseVersionFileContent\022.\n\014live_se" +
+ "rvers\030\002 \003(\0132\030.hbase.pb.LiveServerInfo\022*\n" +
+ "\014dead_servers\030\003 \003(\0132\024.hbase.pb.ServerNam" +
+ "e\022;\n\025regions_in_transition\030\004 \003(\0132\034.hbase" +
+ ".pb.RegionInTransition\022\'\n\ncluster_id\030\005 \001" +
+ "(\0132\023.hbase.pb.ClusterId\0222\n\023master_coproc" +
+ "essors\030\006 \003(\0132\025.hbase.pb.Coprocessor\022$\n\006m",
+ "aster\030\007 \001(\0132\024.hbase.pb.ServerName\022,\n\016bac" +
+ "kup_masters\030\010 \003(\0132\024.hbase.pb.ServerName\022" +
+ "\023\n\013balancer_on\030\t \001(\010BF\n*org.apache.hadoo" +
+ "p.hbase.protobuf.generatedB\023ClusterStatu" +
+ "sProtosH\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@@ -14805,7 +14928,7 @@ public final class ClusterStatusProtos {
internal_static_hbase_pb_RegionLoad_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RegionLoad_descriptor,
- new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", });
+ new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", "LastMajorCompactionTs", "StoreCompleteSequenceId", "FilteredReadRequestsCount", });
internal_static_hbase_pb_ReplicationLoadSink_descriptor =
getDescriptor().getMessageTypes().get(5);
internal_static_hbase_pb_ReplicationLoadSink_fieldAccessorTable = new
diff --git a/hbase-protocol/src/main/protobuf/ClusterStatus.proto b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
index 228be7ebb55..54bc0c3323f 100644
--- a/hbase-protocol/src/main/protobuf/ClusterStatus.proto
+++ b/hbase-protocol/src/main/protobuf/ClusterStatus.proto
@@ -136,6 +136,9 @@ message RegionLoad {
/** the most recent sequence Id of store from cache flush */
repeated StoreSequenceId store_complete_sequence_id = 18;
+
+ /** the current total filtered read requests made to region */
+ optional uint64 filtered_read_requests_count = 19;
}
/* Server-level protobufs */
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
index b7c894f86e3..c051743b38e 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/master/RegionServerListTmpl.jamon
@@ -174,6 +174,7 @@ for (ServerName serverName: serverNames) {
ServerName |
Request Per Second |
Read Request Count |
+ Filtered Read Request Count |
Write Request Count |
<%java>
@@ -186,6 +187,7 @@ if (sl != null) {
<& serverNameLink; serverName=serverName; serverLoad = sl; &> |
<% String.format("%.0f", sl.getRequestsPerSecond()) %> |
<% sl.getReadRequestsCount() %> |
+<% sl.getFilteredReadRequestsCount() %> |
<% sl.getWriteRequestsCount() %> |
<%java>
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
index 7740c53b572..cefd476f875 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/RegionListTmpl.jamon
@@ -118,6 +118,7 @@
Region Name |
Read Request Count |
+ Filtered Read Request Count |
Write Request Count |
@@ -132,6 +133,7 @@
<% displayName %> |
<%if load != null %>
<% load.getReadRequestsCount() %> |
+ <% load.getFilteredReadRequestsCount() %> |
<% load.getWriteRequestsCount() %> |
%if>
diff --git a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
index 4f6a754e547..a55a863fd8e 100644
--- a/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
+++ b/hbase-server/src/main/jamon/org/apache/hadoop/hbase/tmpl/regionserver/ServerMetricsTmpl.jamon
@@ -173,11 +173,13 @@ MetricsRegionServerWrapper mWrap;
Request Per Second |
Read Request Count |
+ Filtered Read Request Count |
Write Request Count |
<% String.format("%.0f", mWrap.getRequestsPerSecond()) %> |
<% mWrap.getReadRequestsCount() %> |
+ <% mWrap.getFilteredReadRequestsCount() %> |
<% mWrap.getWriteRequestsCount() %> |
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
index 99b571fa859..f03c20527ab 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegion.java
@@ -284,6 +284,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
//Number of requests
final Counter readRequestsCount = new Counter();
+ final Counter filteredReadRequestsCount = new Counter();
final Counter writeRequestsCount = new Counter();
// Number of requests blocked by memstore size.
@@ -1112,6 +1113,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
readRequestsCount.add(i);
}
+ @Override
+ public long getFilteredReadRequestsCount() {
+ return filteredReadRequestsCount.get();
+ }
+
@Override
public long getWriteRequestsCount() {
return writeRequestsCount.get();
@@ -6025,6 +6031,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
}
protected void incrementCountOfRowsFilteredMetric(ScannerContext scannerContext) {
+ filteredReadRequestsCount.increment();
+
if (scannerContext == null || !scannerContext.isTrackingMetrics()) return;
scannerContext.getMetrics().countOfRowsFiltered.incrementAndGet();
@@ -6524,6 +6532,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
HRegion r = HRegion.newHRegion(this.fs.getTableDir(), this.getWAL(), fs.getFileSystem(),
this.getBaseConf(), hri, this.getTableDesc(), rsServices);
r.readRequestsCount.set(this.getReadRequestsCount() / 2);
+ r.filteredReadRequestsCount.set(this.getFilteredReadRequestsCount() / 2);
r.writeRequestsCount.set(this.getWriteRequestsCount() / 2);
return r;
}
@@ -6541,6 +6550,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
this.getTableDesc(), this.rsServices);
r.readRequestsCount.set(this.getReadRequestsCount()
+ region_b.getReadRequestsCount());
+ r.filteredReadRequestsCount.set(this.getFilteredReadRequestsCount()
+ + region_b.getFilteredReadRequestsCount());
r.writeRequestsCount.set(this.getWriteRequestsCount()
+ region_b.getWriteRequestsCount());
@@ -7590,7 +7601,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
public static final long FIXED_OVERHEAD = ClassSize.align(
ClassSize.OBJECT +
ClassSize.ARRAY +
- 44 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT +
+ 45 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT +
(14 * Bytes.SIZEOF_LONG) +
5 * Bytes.SIZEOF_BOOLEAN);
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
index 1183f96dc6d..9cb100fe592 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java
@@ -1518,6 +1518,7 @@ public class HRegionServer extends HasThread implements
.setTotalStaticIndexSizeKB(totalStaticIndexSizeKB)
.setTotalStaticBloomSizeKB(totalStaticBloomSizeKB)
.setReadRequestsCount(r.getReadRequestsCount())
+ .setFilteredReadRequestsCount(r.getFilteredReadRequestsCount())
.setWriteRequestsCount(r.getWriteRequestsCount())
.setTotalCompactingKVs(totalCompactingKVs)
.setCurrentCompactedKVs(currentCompactedKVs)
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
index f3e89163cb5..4f9ba5b552c 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperImpl.java
@@ -68,6 +68,7 @@ class MetricsRegionServerWrapperImpl
private volatile long storeFileSize = 0;
private volatile double requestsPerSecond = 0.0;
private volatile long readRequestsCount = 0;
+ private volatile long filteredReadRequestsCount = 0;
private volatile long writeRequestsCount = 0;
private volatile long checkAndMutateChecksFailed = 0;
private volatile long checkAndMutateChecksPassed = 0;
@@ -407,6 +408,11 @@ class MetricsRegionServerWrapperImpl
return readRequestsCount;
}
+ @Override
+ public long getFilteredReadRequestsCount() {
+ return filteredReadRequestsCount;
+ }
+
@Override
public long getWriteRequestsCount() {
return writeRequestsCount;
@@ -588,7 +594,8 @@ class MetricsRegionServerWrapperImpl
new HDFSBlocksDistribution();
long tempNumStores = 0, tempNumStoreFiles = 0, tempMemstoreSize = 0, tempStoreFileSize = 0;
- long tempReadRequestsCount = 0, tempWriteRequestsCount = 0;
+ long tempReadRequestsCount = 0, tempFilteredReadRequestsCount = 0,
+ tempWriteRequestsCount = 0;
long tempCheckAndMutateChecksFailed = 0;
long tempCheckAndMutateChecksPassed = 0;
long tempStorefileIndexSize = 0;
@@ -619,6 +626,7 @@ class MetricsRegionServerWrapperImpl
tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL();
tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL();
tempReadRequestsCount += r.getReadRequestsCount();
+ tempFilteredReadRequestsCount += r.getFilteredReadRequestsCount();
tempWriteRequestsCount += r.getWriteRequestsCount();
tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed();
tempCheckAndMutateChecksPassed += r.getCheckAndMutateChecksPassed();
@@ -696,6 +704,7 @@ class MetricsRegionServerWrapperImpl
memstoreSize = tempMemstoreSize;
storeFileSize = tempStoreFileSize;
readRequestsCount = tempReadRequestsCount;
+ filteredReadRequestsCount = tempFilteredReadRequestsCount;
writeRequestsCount = tempWriteRequestsCount;
checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed;
checkAndMutateChecksPassed = tempCheckAndMutateChecksPassed;
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
index 08865e6aeb0..2c54079c338 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperImpl.java
@@ -111,6 +111,11 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
return this.region.getReadRequestsCount();
}
+ @Override
+ public long getFilteredReadRequestCount() {
+ return this.region.getFilteredReadRequestsCount();
+ }
+
@Override
public long getWriteRequestCount() {
return this.region.getWriteRequestsCount();
diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
index 213b41a9f2d..976bddb4b3e 100644
--- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
+++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/Region.java
@@ -185,6 +185,9 @@ public interface Region extends ConfigurationObserver {
*/
void updateReadRequestsCount(long i);
+ /** @return filtered read requests count for this region */
+ long getFilteredReadRequestsCount();
+
/** @return write request count for this region */
long getWriteRequestsCount();
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
new file mode 100644
index 00000000000..76e58420f3c
--- /dev/null
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestRegionServerMetrics.java
@@ -0,0 +1,379 @@
+/**
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase;
+
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Append;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Increment;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.BinaryComparator;
+import org.apache.hadoop.hbase.filter.CompareFilter;
+import org.apache.hadoop.hbase.filter.RowFilter;
+import org.apache.hadoop.hbase.filter.SingleColumnValueFilter;
+import org.apache.hadoop.hbase.testclassification.MediumTests;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import java.io.IOException;
+import java.util.Collection;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import static org.junit.Assert.assertEquals;
+
+@Category(MediumTests.class)
+public class TestRegionServerMetrics {
+ private static final HBaseTestingUtility TEST_UTIL = new HBaseTestingUtility();
+ private static final TableName TABLE_NAME = TableName.valueOf("test");
+ private static final byte[] CF1 = "c1".getBytes();
+ private static final byte[] CF2 = "c2".getBytes();
+
+ private static final byte[] ROW1 = "a".getBytes();
+ private static final byte[] ROW2 = "b".getBytes();
+ private static final byte[] ROW3 = "c".getBytes();
+ private static final byte[] COL1 = "q1".getBytes();
+ private static final byte[] COL2 = "q2".getBytes();
+ private static final byte[] COL3 = "q3".getBytes();
+ private static final byte[] VAL1 = "v1".getBytes();
+ private static final byte[] VAL2 = "v2".getBytes();
+ private static final byte[] VAL3 = Bytes.toBytes(0L);
+
+ private static final int MAX_TRY = 20;
+ private static final int SLEEP_MS = 100;
+ private static final int TTL = 1;
+
+ private static Admin admin;
+ private static Collection serverNames;
+ private static Table table;
+ private static List tableRegions;
+
+ private static Map requestsMap = new HashMap<>();
+ private static Map requestsMapPrev = new HashMap<>();
+
+ @BeforeClass
+ public static void setUpOnce() throws Exception {
+ TEST_UTIL.startMiniCluster();
+ admin = TEST_UTIL.getAdmin();
+ serverNames = admin.getClusterStatus().getServers();
+ table = createTable();
+ putData();
+ tableRegions = admin.getTableRegions(TABLE_NAME);
+
+ for (Metric metric : Metric.values()) {
+ requestsMap.put(metric, 0L);
+ requestsMapPrev.put(metric, 0L);
+ }
+ }
+
+ private static Table createTable() throws IOException {
+ HTableDescriptor td = new HTableDescriptor(TABLE_NAME);
+ HColumnDescriptor cd1 = new HColumnDescriptor(CF1);
+ td.addFamily(cd1);
+ HColumnDescriptor cd2 = new HColumnDescriptor(CF2);
+ cd2.setTimeToLive(TTL);
+ td.addFamily(cd2);
+
+ admin.createTable(td);
+ return TEST_UTIL.getConnection().getTable(TABLE_NAME);
+ }
+
+ private static void testReadRequests(long resultCount,
+ long expectedReadRequests, long expectedFilteredReadRequests)
+ throws IOException, InterruptedException {
+ updateMetricsMap();
+ System.out.println("requestsMapPrev = " + requestsMapPrev);
+ System.out.println("requestsMap = " + requestsMap);
+
+ assertEquals(expectedReadRequests,
+ requestsMap.get(Metric.REGION_READ) - requestsMapPrev.get(Metric.REGION_READ));
+ assertEquals(expectedReadRequests,
+ requestsMap.get(Metric.SERVER_READ) - requestsMapPrev.get(Metric.SERVER_READ));
+ assertEquals(expectedFilteredReadRequests,
+ requestsMap.get(Metric.FILTERED_REGION_READ)
+ - requestsMapPrev.get(Metric.FILTERED_REGION_READ));
+ assertEquals(expectedFilteredReadRequests,
+ requestsMap.get(Metric.FILTERED_SERVER_READ)
+ - requestsMapPrev.get(Metric.FILTERED_SERVER_READ));
+ assertEquals(expectedReadRequests, resultCount);
+ }
+
+ private static void updateMetricsMap() throws IOException, InterruptedException {
+ for (Metric metric : Metric.values()) {
+ requestsMapPrev.put(metric, requestsMap.get(metric));
+ }
+
+ ServerLoad serverLoad = null;
+ RegionLoad regionLoadOuter = null;
+ boolean metricsUpdated = false;
+ for (int i = 0; i < MAX_TRY; i++) {
+ for (ServerName serverName : serverNames) {
+ serverLoad = admin.getClusterStatus().getLoad(serverName);
+
+ Map regionsLoad = serverLoad.getRegionsLoad();
+ for (HRegionInfo tableRegion : tableRegions) {
+ RegionLoad regionLoad = regionsLoad.get(tableRegion.getRegionName());
+ if (regionLoad != null) {
+ regionLoadOuter = regionLoad;
+ for (Metric metric : Metric.values()) {
+ if (getReadRequest(serverLoad, regionLoad, metric) > requestsMapPrev.get(metric)) {
+ for (Metric metricInner : Metric.values()) {
+ requestsMap.put(metricInner, getReadRequest(serverLoad, regionLoad, metricInner));
+ }
+ metricsUpdated = true;
+ break;
+ }
+ }
+ }
+ }
+ }
+ if (metricsUpdated) {
+ break;
+ }
+ Thread.sleep(SLEEP_MS);
+ }
+ if (!metricsUpdated) {
+ for (Metric metric : Metric.values()) {
+ requestsMap.put(metric, getReadRequest(serverLoad, regionLoadOuter, metric));
+ }
+ }
+ }
+
+ private static long getReadRequest(ServerLoad serverLoad, RegionLoad regionLoad, Metric metric) {
+ switch (metric) {
+ case REGION_READ:
+ return regionLoad.getReadRequestsCount();
+ case SERVER_READ:
+ return serverLoad.getReadRequestsCount();
+ case FILTERED_REGION_READ:
+ return regionLoad.getFilteredReadRequestsCount();
+ case FILTERED_SERVER_READ:
+ return serverLoad.getFilteredReadRequestsCount();
+ default:
+ throw new IllegalStateException();
+ }
+ }
+
+ private static void putData() throws IOException {
+ Put put;
+
+ put = new Put(ROW1);
+ put.addColumn(CF1, COL1, VAL1);
+ put.addColumn(CF1, COL2, VAL2);
+ put.addColumn(CF1, COL3, VAL3);
+ table.put(put);
+ put = new Put(ROW2);
+ put.addColumn(CF1, COL1, VAL2); // put val2 instead of val1
+ put.addColumn(CF1, COL2, VAL2);
+ table.put(put);
+ put = new Put(ROW3);
+ put.addColumn(CF1, COL1, VAL1);
+ put.addColumn(CF1, COL2, VAL2);
+ table.put(put);
+ }
+
+ private static void putTTLExpiredData() throws IOException, InterruptedException {
+ Put put;
+
+ put = new Put(ROW1);
+ put.addColumn(CF2, COL1, VAL1);
+ put.addColumn(CF2, COL2, VAL2);
+ table.put(put);
+
+ Thread.sleep(TTL * 1000);
+
+ put = new Put(ROW2);
+ put.addColumn(CF2, COL1, VAL1);
+ put.addColumn(CF2, COL2, VAL2);
+ table.put(put);
+
+ put = new Put(ROW3);
+ put.addColumn(CF2, COL1, VAL1);
+ put.addColumn(CF2, COL2, VAL2);
+ table.put(put);
+ }
+
+ @AfterClass
+ public static void tearDownOnce() throws Exception {
+ TEST_UTIL.shutdownMiniCluster();
+ }
+
+ @Test
+ public void testReadRequestsCountNotFiltered() throws Exception {
+ int resultCount;
+ Scan scan;
+ Append append;
+ Put put;
+ Increment increment;
+ Get get;
+
+ // test for scan
+ scan = new Scan();
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 3, 0);
+ }
+
+ // test for scan
+ scan = new Scan(ROW2, ROW3);
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 1, 0);
+ }
+
+ // test for get
+ get = new Get(ROW2);
+ Result result = table.get(get);
+ resultCount = result.isEmpty() ? 0 : 1;
+ testReadRequests(resultCount, 1, 0);
+
+ // test for increment
+ increment = new Increment(ROW1);
+ increment.addColumn(CF1, COL3, 1);
+ result = table.increment(increment);
+ resultCount = result.isEmpty() ? 0 : 1;
+ testReadRequests(resultCount, 1, 0);
+
+ // test for checkAndPut
+ put = new Put(ROW1);
+ put.addColumn(CF1, COL2, VAL2);
+ boolean checkAndPut =
+ table.checkAndPut(ROW1, CF1, COL2, CompareFilter.CompareOp.EQUAL, VAL2, put);
+ resultCount = checkAndPut ? 1 : 0;
+ testReadRequests(resultCount, 1, 0);
+
+ // test for append
+ append = new Append(ROW1);
+ append.add(CF1, COL2, VAL2);
+ result = table.append(append);
+ resultCount = result.isEmpty() ? 0 : 1;
+ testReadRequests(resultCount, 1, 0);
+
+ // test for checkAndMutate
+ put = new Put(ROW1);
+ put.addColumn(CF1, COL1, VAL1);
+ RowMutations rm = new RowMutations(ROW1);
+ rm.add(put);
+ boolean checkAndMutate =
+ table.checkAndMutate(ROW1, CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1, rm);
+ resultCount = checkAndMutate ? 1 : 0;
+ testReadRequests(resultCount, 1, 0);
+ }
+
+ @Test
+ public void testReadRequestsCountWithFilter() throws Exception {
+ int resultCount;
+ Scan scan;
+
+ // test for scan
+ scan = new Scan();
+ scan.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 2, 1);
+ }
+
+ // test for scan
+ scan = new Scan();
+ scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 1, 2);
+ }
+
+ // test for scan
+ scan = new Scan(ROW2, ROW3);
+ scan.setFilter(new RowFilter(CompareFilter.CompareOp.EQUAL, new BinaryComparator(ROW1)));
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 0, 1);
+ }
+
+ // fixme filtered get should not increase readRequestsCount
+// Get get = new Get(ROW2);
+// get.setFilter(new SingleColumnValueFilter(CF1, COL1, CompareFilter.CompareOp.EQUAL, VAL1));
+// Result result = table.get(get);
+// resultCount = result.isEmpty() ? 0 : 1;
+// testReadRequests(resultCount, 0, 1);
+ }
+
+ @Test
+ public void testReadRequestsCountWithDeletedRow() throws Exception {
+ try {
+ Delete delete = new Delete(ROW3);
+ table.delete(delete);
+
+ Scan scan = new Scan();
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ int resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 2, 1);
+ }
+ } finally {
+ Put put = new Put(ROW3);
+ put.addColumn(CF1, COL1, VAL1);
+ put.addColumn(CF1, COL2, VAL2);
+ table.put(put);
+ }
+ }
+
+ @Test
+ public void testReadRequestsCountWithTTLExpiration() throws Exception {
+ putTTLExpiredData();
+
+ Scan scan = new Scan();
+ scan.addFamily(CF2);
+ try (ResultScanner scanner = table.getScanner(scan)) {
+ int resultCount = 0;
+ for (Result ignore : scanner) {
+ resultCount++;
+ }
+ testReadRequests(resultCount, 2, 1);
+ }
+ }
+
+ private enum Metric {REGION_READ, SERVER_READ, FILTERED_REGION_READ, FILTERED_SERVER_READ}
+}
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
index 5c56e9ac20e..e6c17a5b53c 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestServerLoad.java
@@ -44,6 +44,7 @@ public class TestServerLoad {
assertEquals(820, sl.getStorefileSizeInMB());
assertEquals(82, sl.getStorefileIndexSizeInMB());
assertEquals(((long)Integer.MAX_VALUE)*2, sl.getReadRequestsCount());
+ assertEquals(300, sl.getFilteredReadRequestsCount());
}
@@ -57,6 +58,7 @@ public class TestServerLoad {
assertTrue(slToString.contains("storefileSizeMB=820"));
assertTrue(slToString.contains("rootIndexSizeKB=504"));
assertTrue(slToString.contains("coprocessors=[]"));
+ assertTrue(slToString.contains("filteredReadRequestsCount=300"));
}
@Test
@@ -80,10 +82,12 @@ public class TestServerLoad {
ClusterStatusProtos.RegionLoad rlOne =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecOne).setStores(10)
.setStorefiles(101).setStoreUncompressedSizeMB(106).setStorefileSizeMB(520)
+ .setFilteredReadRequestsCount(100)
.setStorefileIndexSizeMB(42).setRootIndexSizeKB(201).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.RegionLoad rlTwo =
ClusterStatusProtos.RegionLoad.newBuilder().setRegionSpecifier(rSpecTwo).setStores(3)
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
+ .setFilteredReadRequestsCount(200)
.setStorefileIndexSizeMB(40).setRootIndexSizeKB(303).setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE).build();
ClusterStatusProtos.ServerLoad sl =
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
index 0d932841b2c..4b006324316 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionServerWrapperStub.java
@@ -85,6 +85,11 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
return 997;
}
+ @Override
+ public long getFilteredReadRequestsCount() {
+ return 1997;
+ }
+
@Override
public long getWriteRequestsCount() {
return 707;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
index c43ccc32b7f..8e6dd743573 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/MetricsRegionWrapperStub.java
@@ -75,6 +75,11 @@ public class MetricsRegionWrapperStub implements MetricsRegionWrapper {
return 105;
}
+ @Override
+ public long getFilteredReadRequestCount() {
+ return 107;
+ }
+
@Override
public long getWriteRequestCount() {
return 106;
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
index e7398905cd6..cc09d157d7a 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegion.java
@@ -45,6 +45,10 @@ public class TestMetricsRegion {
HELPER.assertGauge(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
103, agg);
+ HELPER.assertCounter(
+ "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
+ "filteredReadRequestCount",
+ 107, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid",
0, agg);
@@ -62,6 +66,10 @@ public class TestMetricsRegion {
HELPER.assertGauge(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
103, agg);
+ HELPER.assertCounter(
+ "namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
+ "filteredReadRequestCount",
+ 107, agg);
HELPER.assertCounter(
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_replicaid",
1, agg);
diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
index f3ce0bda0d3..77d6a95d6e2 100644
--- a/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
+++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/regionserver/TestMetricsRegionServer.java
@@ -67,6 +67,7 @@ public class TestMetricsRegionServer {
HELPER.assertGauge("storeFileSize", 1900, serverSource);
HELPER.assertCounter("totalRequestCount", 899, serverSource);
HELPER.assertCounter("readRequestCount", 997, serverSource);
+ HELPER.assertCounter("filteredReadRequestCount", 1997, serverSource);
HELPER.assertCounter("writeRequestCount", 707, serverSource);
HELPER.assertCounter("checkMutateFailedCount", 401, serverSource);
HELPER.assertCounter("checkMutatePassedCount", 405, serverSource);