HBASE-12986 Compaction pressure based client pushback

Signed-off-by: Andrew Purtell <apurtell@apache.org>
This commit is contained in:
chenheng 2015-10-29 11:54:21 +08:00 committed by Andrew Purtell
parent e4bf77e2de
commit 6871724104
6 changed files with 200 additions and 37 deletions

View File

@ -70,6 +70,9 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy {
// Factor in heap occupancy
float heapOccupancy = regionStats.getHeapOccupancyPercent() / 100.0f;
// Factor in compaction pressure, 1.0 means heavy compaction pressure
float compactionPressure = regionStats.getCompactionPressure() / 100.0f;
if (heapOccupancy >= heapOccupancyLowWatermark) {
// If we are higher than the high watermark, we are already applying max
// backoff and cannot scale more (see scale() below)
@ -80,7 +83,7 @@ public class ExponentialClientBackoffPolicy implements ClientBackoffPolicy {
scale(heapOccupancy, heapOccupancyLowWatermark, heapOccupancyHighWatermark,
0.1, 1.0));
}
percent = Math.max(percent, compactionPressure);
// square the percent as a value less than 1. Closer we move to 100 percent,
// the percent moves to 1, but squaring causes the exponential curve
double multiplier = Math.pow(percent, 4.0);

View File

@ -57,10 +57,12 @@ public class ServerStatistics {
public static class RegionStatistics {
private int memstoreLoad = 0;
private int heapOccupancy = 0;
private int compactionPressure = 0;
public void update(ClientProtos.RegionLoadStats currentStats) {
this.memstoreLoad = currentStats.getMemstoreLoad();
this.heapOccupancy = currentStats.getHeapOccupancy();
this.compactionPressure = currentStats.getCompactionPressure();
}
public int getMemstoreLoadPercent(){
@ -70,5 +72,10 @@ public class ServerStatistics {
public int getHeapOccupancyPercent(){
return this.heapOccupancy;
}
public int getCompactionPressure() {
return compactionPressure;
}
}
}

View File

@ -113,22 +113,46 @@ public class TestClientExponentialBackoff {
ServerStatistics stats = new ServerStatistics();
long backoffTime;
update(stats, 0, 95);
update(stats, 0, 95, 0);
backoffTime = backoff.getBackoffTime(server, regionname, stats);
assertTrue("Heap occupancy at low watermark had no effect", backoffTime > 0);
long previous = backoffTime;
update(stats, 0, 96);
update(stats, 0, 96, 0);
backoffTime = backoff.getBackoffTime(server, regionname, stats);
assertTrue("Increase above low watermark should have increased backoff",
backoffTime > previous);
update(stats, 0, 98);
update(stats, 0, 98, 0);
backoffTime = backoff.getBackoffTime(server, regionname, stats);
assertEquals("We should be using max backoff when at high watermark", backoffTime,
ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
}
@Test
public void testCompactionPressurePolicy() {
Configuration conf = new Configuration(false);
ExponentialClientBackoffPolicy backoff = new ExponentialClientBackoffPolicy(conf);
ServerStatistics stats = new ServerStatistics();
long backoffTime;
update(stats, 0, 0, 0);
backoffTime = backoff.getBackoffTime(server, regionname, stats);
assertTrue("Compaction pressure has no effect", backoffTime == 0);
long previous = backoffTime;
update(stats, 0, 0, 50);
backoffTime = backoff.getBackoffTime(server, regionname, stats);
assertTrue("Compaction pressure should be bigger",
backoffTime > previous);
update(stats, 0, 0, 100);
backoffTime = backoff.getBackoffTime(server, regionname, stats);
assertEquals("under heavy compaction pressure", backoffTime,
ExponentialClientBackoffPolicy.DEFAULT_MAX_BACKOFF);
}
private void update(ServerStatistics stats, int load) {
ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder()
.setMemstoreLoad
@ -136,10 +160,12 @@ public class TestClientExponentialBackoff {
stats.update(regionname, stat);
}
private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy) {
private void update(ServerStatistics stats, int memstoreLoad, int heapOccupancy,
int compactionPressure) {
ClientProtos.RegionLoadStats stat = ClientProtos.RegionLoadStats.newBuilder()
.setMemstoreLoad(memstoreLoad)
.setHeapOccupancy(heapOccupancy)
.setCompactionPressure(compactionPressure)
.build();
stats.update(regionname, stat);
}

View File

@ -27489,6 +27489,24 @@ public final class ClientProtos {
* </pre>
*/
int getHeapOccupancy();
// optional int32 compactionPressure = 3 [default = 0];
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
boolean hasCompactionPressure();
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
int getCompactionPressure();
}
/**
* Protobuf type {@code hbase.pb.RegionLoadStats}
@ -27556,6 +27574,11 @@ public final class ClientProtos {
heapOccupancy_ = input.readInt32();
break;
}
case 24: {
bitField0_ |= 0x00000004;
compactionPressure_ = input.readInt32();
break;
}
}
}
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
@ -27646,9 +27669,34 @@ public final class ClientProtos {
return heapOccupancy_;
}
// optional int32 compactionPressure = 3 [default = 0];
public static final int COMPACTIONPRESSURE_FIELD_NUMBER = 3;
private int compactionPressure_;
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
public boolean hasCompactionPressure() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
public int getCompactionPressure() {
return compactionPressure_;
}
private void initFields() {
memstoreLoad_ = 0;
heapOccupancy_ = 0;
compactionPressure_ = 0;
}
private byte memoizedIsInitialized = -1;
public final boolean isInitialized() {
@ -27668,6 +27716,9 @@ public final class ClientProtos {
if (((bitField0_ & 0x00000002) == 0x00000002)) {
output.writeInt32(2, heapOccupancy_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
output.writeInt32(3, compactionPressure_);
}
getUnknownFields().writeTo(output);
}
@ -27685,6 +27736,10 @@ public final class ClientProtos {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(2, heapOccupancy_);
}
if (((bitField0_ & 0x00000004) == 0x00000004)) {
size += com.google.protobuf.CodedOutputStream
.computeInt32Size(3, compactionPressure_);
}
size += getUnknownFields().getSerializedSize();
memoizedSerializedSize = size;
return size;
@ -27718,6 +27773,11 @@ public final class ClientProtos {
result = result && (getHeapOccupancy()
== other.getHeapOccupancy());
}
result = result && (hasCompactionPressure() == other.hasCompactionPressure());
if (hasCompactionPressure()) {
result = result && (getCompactionPressure()
== other.getCompactionPressure());
}
result = result &&
getUnknownFields().equals(other.getUnknownFields());
return result;
@ -27739,6 +27799,10 @@ public final class ClientProtos {
hash = (37 * hash) + HEAPOCCUPANCY_FIELD_NUMBER;
hash = (53 * hash) + getHeapOccupancy();
}
if (hasCompactionPressure()) {
hash = (37 * hash) + COMPACTIONPRESSURE_FIELD_NUMBER;
hash = (53 * hash) + getCompactionPressure();
}
hash = (29 * hash) + getUnknownFields().hashCode();
memoizedHashCode = hash;
return hash;
@ -27857,6 +27921,8 @@ public final class ClientProtos {
bitField0_ = (bitField0_ & ~0x00000001);
heapOccupancy_ = 0;
bitField0_ = (bitField0_ & ~0x00000002);
compactionPressure_ = 0;
bitField0_ = (bitField0_ & ~0x00000004);
return this;
}
@ -27893,6 +27959,10 @@ public final class ClientProtos {
to_bitField0_ |= 0x00000002;
}
result.heapOccupancy_ = heapOccupancy_;
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
to_bitField0_ |= 0x00000004;
}
result.compactionPressure_ = compactionPressure_;
result.bitField0_ = to_bitField0_;
onBuilt();
return result;
@ -27915,6 +27985,9 @@ public final class ClientProtos {
if (other.hasHeapOccupancy()) {
setHeapOccupancy(other.getHeapOccupancy());
}
if (other.hasCompactionPressure()) {
setCompactionPressure(other.getCompactionPressure());
}
this.mergeUnknownFields(other.getUnknownFields());
return this;
}
@ -28044,6 +28117,55 @@ public final class ClientProtos {
return this;
}
// optional int32 compactionPressure = 3 [default = 0];
private int compactionPressure_ ;
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
public boolean hasCompactionPressure() {
return ((bitField0_ & 0x00000004) == 0x00000004);
}
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
public int getCompactionPressure() {
return compactionPressure_;
}
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
public Builder setCompactionPressure(int value) {
bitField0_ |= 0x00000004;
compactionPressure_ = value;
onChanged();
return this;
}
/**
* <code>optional int32 compactionPressure = 3 [default = 0];</code>
*
* <pre>
* Compaction pressure. Guaranteed to be positive, between 0 and 100.
* </pre>
*/
public Builder clearCompactionPressure() {
bitField0_ = (bitField0_ & ~0x00000004);
compactionPressure_ = 0;
onChanged();
return this;
}
// @@protoc_insertion_point(builder_scope:hbase.pb.RegionLoadStats)
}
@ -33308,38 +33430,39 @@ public final class ClientProtos {
"oprocessorServiceCall\"k\n\014RegionAction\022)\n",
"\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifier" +
"\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(\0132\020.hbase" +
".pb.Action\"D\n\017RegionLoadStats\022\027\n\014memstor" +
".pb.Action\"c\n\017RegionLoadStats\022\027\n\014memstor" +
"eLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy\030\002 \001(\005:\0010" +
"\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n" +
"\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcep" +
"tion\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016s" +
"ervice_result\030\004 \001(\0132\".hbase.pb.Coprocess" +
"orServiceResult\022,\n\tloadStats\030\005 \001(\0132\031.hba" +
"se.pb.RegionLoadStats\"x\n\022RegionActionRes",
"ult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase.p" +
"b.ResultOrException\022*\n\texception\030\002 \001(\0132\027" +
".hbase.pb.NameBytesPair\"x\n\014MultiRequest\022" +
",\n\014regionAction\030\001 \003(\0132\026.hbase.pb.RegionA" +
"ction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003" +
" \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRespon" +
"se\0228\n\022regionActionResult\030\001 \003(\0132\034.hbase.p" +
"b.RegionActionResult\022\021\n\tprocessed\030\002 \001(\010*" +
"\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" +
"2\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get",
"Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" +
"\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" +
"ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque" +
"st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" +
"ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb" +
"ase.pb.BulkLoadHFileResponse\022X\n\013ExecServ" +
"ice\022#.hbase.pb.CoprocessorServiceRequest" +
"\032$.hbase.pb.CoprocessorServiceResponse\022d" +
"\n\027ExecRegionServerService\022#.hbase.pb.Cop" +
"rocessorServiceRequest\032$.hbase.pb.Coproc",
"essorServiceResponse\0228\n\005Multi\022\026.hbase.pb" +
".MultiRequest\032\027.hbase.pb.MultiResponseBB" +
"\n*org.apache.hadoop.hbase.protobuf.gener" +
"atedB\014ClientProtosH\001\210\001\001\240\001\001"
"\022\035\n\022compactionPressure\030\003 \001(\005:\0010\"\332\001\n\021Resu" +
"ltOrException\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002" +
" \001(\0132\020.hbase.pb.Result\022*\n\texception\030\003 \001(" +
"\0132\027.hbase.pb.NameBytesPair\022:\n\016service_re" +
"sult\030\004 \001(\0132\".hbase.pb.CoprocessorService" +
"Result\022,\n\tloadStats\030\005 \001(\0132\031.hbase.pb.Reg",
"ionLoadStats\"x\n\022RegionActionResult\0226\n\021re" +
"sultOrException\030\001 \003(\0132\033.hbase.pb.ResultO" +
"rException\022*\n\texception\030\002 \001(\0132\027.hbase.pb" +
".NameBytesPair\"x\n\014MultiRequest\022,\n\014region" +
"Action\030\001 \003(\0132\026.hbase.pb.RegionAction\022\022\n\n" +
"nonceGroup\030\002 \001(\004\022&\n\tcondition\030\003 \001(\0132\023.hb" +
"ase.pb.Condition\"\\\n\rMultiResponse\0228\n\022reg" +
"ionActionResult\030\001 \003(\0132\034.hbase.pb.RegionA" +
"ctionResult\022\021\n\tprocessed\030\002 \001(\010*\'\n\013Consis" +
"tency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\0012\203\004\n\rClie",
"ntService\0222\n\003Get\022\024.hbase.pb.GetRequest\032\025" +
".hbase.pb.GetResponse\022;\n\006Mutate\022\027.hbase." +
"pb.MutateRequest\032\030.hbase.pb.MutateRespon" +
"se\0225\n\004Scan\022\025.hbase.pb.ScanRequest\032\026.hbas" +
"e.pb.ScanResponse\022P\n\rBulkLoadHFile\022\036.hba" +
"se.pb.BulkLoadHFileRequest\032\037.hbase.pb.Bu" +
"lkLoadHFileResponse\022X\n\013ExecService\022#.hba" +
"se.pb.CoprocessorServiceRequest\032$.hbase." +
"pb.CoprocessorServiceResponse\022d\n\027ExecReg" +
"ionServerService\022#.hbase.pb.CoprocessorS",
"erviceRequest\032$.hbase.pb.CoprocessorServ" +
"iceResponse\0228\n\005Multi\022\026.hbase.pb.MultiReq" +
"uest\032\027.hbase.pb.MultiResponseBB\n*org.apa" +
"che.hadoop.hbase.protobuf.generatedB\014Cli" +
"entProtosH\001\210\001\001\240\001\001"
};
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
@ -33501,7 +33624,7 @@ public final class ClientProtos {
internal_static_hbase_pb_RegionLoadStats_fieldAccessorTable = new
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
internal_static_hbase_pb_RegionLoadStats_descriptor,
new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", });
new java.lang.String[] { "MemstoreLoad", "HeapOccupancy", "CompactionPressure", });
internal_static_hbase_pb_ResultOrException_descriptor =
getDescriptor().getMessageTypes().get(23);
internal_static_hbase_pb_ResultOrException_fieldAccessorTable = new

View File

@ -395,6 +395,8 @@ message RegionLoadStats {
// Percent JVM heap occupancy. Guaranteed to be positive, between 0 and 100.
// We can move this to "ServerLoadStats" should we develop them.
optional int32 heapOccupancy = 2 [default = 0];
// Compaction pressure. Guaranteed to be positive, between 0 and 100.
optional int32 compactionPressure = 3 [default = 0];
}
/**

View File

@ -6706,6 +6706,8 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
stats.setMemstoreLoad((int) (Math.min(100, (this.memstoreSize.get() * 100) / this
.memstoreFlushSize)));
stats.setHeapOccupancy((int)rsServices.getHeapMemoryManager().getHeapOccupancyPercent()*100);
stats.setCompactionPressure((int)rsServices.getCompactionPressure()*100 > 100 ? 100 :
(int)rsServices.getCompactionPressure()*100);
return stats.build();
}