HBASE-20740 StochasticLoadBalancer should consider CoprocessorService request factor when computing cost (chenxu)
This commit is contained in:
parent
7b716c964b
commit
98245ca6e4
|
@ -93,6 +93,11 @@ public class RegionLoad implements RegionMetrics {
|
|||
return metrics.getReadRequestCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestCount() {
|
||||
return metrics.getCpRequestCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilteredReadRequestCount() {
|
||||
return metrics.getFilteredReadRequestCount();
|
||||
|
|
|
@ -66,10 +66,16 @@ public interface RegionMetrics {
|
|||
long getWriteRequestCount();
|
||||
|
||||
/**
|
||||
* @return the number of write requests and read requests made to region
|
||||
* @return the number of coprocessor service requests made to region
|
||||
*/
|
||||
public long getCpRequestCount();
|
||||
|
||||
/**
|
||||
* @return the number of write requests and read requests and coprocessor
|
||||
* service requests made to region
|
||||
*/
|
||||
default long getRequestCount() {
|
||||
return getReadRequestCount() + getWriteRequestCount();
|
||||
return getReadRequestCount() + getWriteRequestCount() + getCpRequestCount();
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -57,6 +57,7 @@ public final class RegionMetricsBuilder {
|
|||
.setLastMajorCompactionTimestamp(regionLoadPB.getLastMajorCompactionTs())
|
||||
.setMemStoreSize(new Size(regionLoadPB.getMemStoreSizeMB(), Size.Unit.MEGABYTE))
|
||||
.setReadRequestCount(regionLoadPB.getReadRequestsCount())
|
||||
.setCpRequestCount(regionLoadPB.getCpRequestsCount())
|
||||
.setWriteRequestCount(regionLoadPB.getWriteRequestsCount())
|
||||
.setStoreFileIndexSize(new Size(regionLoadPB.getStorefileIndexSizeKB(),
|
||||
Size.Unit.KILOBYTE))
|
||||
|
@ -102,6 +103,7 @@ public final class RegionMetricsBuilder {
|
|||
.setLastMajorCompactionTs(regionMetrics.getLastMajorCompactionTimestamp())
|
||||
.setMemStoreSizeMB((int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE))
|
||||
.setReadRequestsCount(regionMetrics.getReadRequestCount())
|
||||
.setCpRequestsCount(regionMetrics.getCpRequestCount())
|
||||
.setWriteRequestsCount(regionMetrics.getWriteRequestCount())
|
||||
.setStorefileIndexSizeKB((long) regionMetrics.getStoreFileIndexSize()
|
||||
.get(Size.Unit.KILOBYTE))
|
||||
|
@ -134,6 +136,7 @@ public final class RegionMetricsBuilder {
|
|||
private Size uncompressedStoreFileSize = Size.ZERO;
|
||||
private long writeRequestCount;
|
||||
private long readRequestCount;
|
||||
private long cpRequestCount;
|
||||
private long filteredReadRequestCount;
|
||||
private long completedSequenceId;
|
||||
private Map<byte[], Long> storeSequenceIds = Collections.emptyMap();
|
||||
|
@ -195,6 +198,10 @@ public final class RegionMetricsBuilder {
|
|||
this.readRequestCount = value;
|
||||
return this;
|
||||
}
|
||||
public RegionMetricsBuilder setCpRequestCount(long value) {
|
||||
this.cpRequestCount = value;
|
||||
return this;
|
||||
}
|
||||
public RegionMetricsBuilder setFilteredReadRequestCount(long value) {
|
||||
this.filteredReadRequestCount = value;
|
||||
return this;
|
||||
|
@ -231,6 +238,7 @@ public final class RegionMetricsBuilder {
|
|||
uncompressedStoreFileSize,
|
||||
writeRequestCount,
|
||||
readRequestCount,
|
||||
cpRequestCount,
|
||||
filteredReadRequestCount,
|
||||
completedSequenceId,
|
||||
storeSequenceIds,
|
||||
|
@ -253,6 +261,7 @@ public final class RegionMetricsBuilder {
|
|||
private final Size uncompressedStoreFileSize;
|
||||
private final long writeRequestCount;
|
||||
private final long readRequestCount;
|
||||
private final long cpRequestCount;
|
||||
private final long filteredReadRequestCount;
|
||||
private final long completedSequenceId;
|
||||
private final Map<byte[], Long> storeSequenceIds;
|
||||
|
@ -272,6 +281,7 @@ public final class RegionMetricsBuilder {
|
|||
Size uncompressedStoreFileSize,
|
||||
long writeRequestCount,
|
||||
long readRequestCount,
|
||||
long cpRequestCount,
|
||||
long filteredReadRequestCount,
|
||||
long completedSequenceId,
|
||||
Map<byte[], Long> storeSequenceIds,
|
||||
|
@ -291,6 +301,7 @@ public final class RegionMetricsBuilder {
|
|||
this.uncompressedStoreFileSize = Preconditions.checkNotNull(uncompressedStoreFileSize);
|
||||
this.writeRequestCount = writeRequestCount;
|
||||
this.readRequestCount = readRequestCount;
|
||||
this.cpRequestCount = cpRequestCount;
|
||||
this.filteredReadRequestCount = filteredReadRequestCount;
|
||||
this.completedSequenceId = completedSequenceId;
|
||||
this.storeSequenceIds = Preconditions.checkNotNull(storeSequenceIds);
|
||||
|
@ -328,6 +339,11 @@ public final class RegionMetricsBuilder {
|
|||
return readRequestCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestCount() {
|
||||
return cpRequestCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilteredReadRequestCount() {
|
||||
return filteredReadRequestCount;
|
||||
|
@ -415,6 +431,8 @@ public final class RegionMetricsBuilder {
|
|||
this.getMemStoreSize());
|
||||
Strings.appendKeyValue(sb, "readRequestCount",
|
||||
this.getReadRequestCount());
|
||||
Strings.appendKeyValue(sb, "cpRequestCount",
|
||||
this.getCpRequestCount());
|
||||
Strings.appendKeyValue(sb, "writeRequestCount",
|
||||
this.getWriteRequestCount());
|
||||
Strings.appendKeyValue(sb, "rootLevelIndexSize",
|
||||
|
|
|
@ -52,6 +52,7 @@ public class ServerLoad implements ServerMetrics {
|
|||
private int memstoreSizeMB = 0;
|
||||
private long storefileIndexSizeKB = 0;
|
||||
private long readRequestsCount = 0;
|
||||
private long cpRequestsCount = 0;
|
||||
private long filteredReadRequestsCount = 0;
|
||||
private long writeRequestsCount = 0;
|
||||
private int rootIndexSizeKB = 0;
|
||||
|
@ -86,6 +87,7 @@ public class ServerLoad implements ServerMetrics {
|
|||
storefileSizeMB += rl.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
memstoreSizeMB += rl.getMemStoreSize().get(Size.Unit.MEGABYTE);
|
||||
readRequestsCount += rl.getReadRequestCount();
|
||||
cpRequestsCount += rl.getCpRequestCount();
|
||||
filteredReadRequestsCount += rl.getFilteredReadRequestCount();
|
||||
writeRequestsCount += rl.getWriteRequestCount();
|
||||
storefileIndexSizeKB += rl.getStoreFileIndexSize().get(Size.Unit.KILOBYTE);
|
||||
|
@ -278,6 +280,15 @@ public class ServerLoad implements ServerMetrics {
|
|||
return readRequestsCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
|
||||
* Use {@link #getRegionMetrics} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public long getCpRequestsCount() {
|
||||
return cpRequestsCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @deprecated As of release 2.0.0, this will be removed in HBase 3.0.0
|
||||
* Use {@link #getRegionMetrics} instead.
|
||||
|
@ -501,6 +512,7 @@ public class ServerLoad implements ServerMetrics {
|
|||
Strings.appendKeyValue(sb, "storefileIndexSizeKB",
|
||||
Long.valueOf(this.storefileIndexSizeKB));
|
||||
Strings.appendKeyValue(sb, "readRequestsCount", Long.valueOf(this.readRequestsCount));
|
||||
Strings.appendKeyValue(sb, "cpRequestsCount", Long.valueOf(this.cpRequestsCount));
|
||||
Strings.appendKeyValue(sb, "filteredReadRequestsCount",
|
||||
Long.valueOf(this.filteredReadRequestsCount));
|
||||
Strings.appendKeyValue(sb, "writeRequestsCount", Long.valueOf(this.writeRequestsCount));
|
||||
|
@ -547,9 +559,9 @@ public class ServerLoad implements ServerMetrics {
|
|||
public int hashCode() {
|
||||
return Objects
|
||||
.hashCode(stores, storefiles, storeUncompressedSizeMB, storefileSizeMB, memstoreSizeMB,
|
||||
storefileIndexSizeKB, readRequestsCount, filteredReadRequestsCount, writeRequestsCount,
|
||||
rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB, totalCompactingKVs,
|
||||
currentCompactedKVs);
|
||||
storefileIndexSizeKB, readRequestsCount, cpRequestsCount, filteredReadRequestsCount,
|
||||
writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB, totalStaticBloomSizeKB,
|
||||
totalCompactingKVs, currentCompactedKVs);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -562,6 +574,7 @@ public class ServerLoad implements ServerMetrics {
|
|||
&& storefileSizeMB == sl.storefileSizeMB && memstoreSizeMB == sl.memstoreSizeMB
|
||||
&& storefileIndexSizeKB == sl.storefileIndexSizeKB
|
||||
&& readRequestsCount == sl.readRequestsCount
|
||||
&& cpRequestsCount == sl.cpRequestsCount
|
||||
&& filteredReadRequestsCount == sl.filteredReadRequestsCount
|
||||
&& writeRequestsCount == sl.writeRequestsCount && rootIndexSizeKB == sl.rootIndexSizeKB
|
||||
&& totalStaticIndexSizeKB == sl.totalStaticIndexSizeKB
|
||||
|
|
|
@ -324,6 +324,7 @@ public final class ServerMetricsBuilder {
|
|||
long storefileIndexSizeKB = 0;
|
||||
long rootLevelIndexSizeKB = 0;
|
||||
long readRequestsCount = 0;
|
||||
long cpRequestsCount = 0;
|
||||
long writeRequestsCount = 0;
|
||||
long filteredReadRequestsCount = 0;
|
||||
long bloomFilterSizeMB = 0;
|
||||
|
@ -337,6 +338,7 @@ public final class ServerMetricsBuilder {
|
|||
memStoreSizeMB += r.getMemStoreSize().get(Size.Unit.MEGABYTE);
|
||||
storefileIndexSizeKB += r.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE);
|
||||
readRequestsCount += r.getReadRequestCount();
|
||||
cpRequestsCount += r.getCpRequestCount();
|
||||
writeRequestsCount += r.getWriteRequestCount();
|
||||
filteredReadRequestsCount += r.getFilteredReadRequestCount();
|
||||
rootLevelIndexSizeKB += r.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE);
|
||||
|
@ -360,6 +362,7 @@ public final class ServerMetricsBuilder {
|
|||
}
|
||||
Strings.appendKeyValue(sb, "memstoreSizeMB", memStoreSizeMB);
|
||||
Strings.appendKeyValue(sb, "readRequestsCount", readRequestsCount);
|
||||
Strings.appendKeyValue(sb, "cpRequestsCount", cpRequestsCount);
|
||||
Strings.appendKeyValue(sb, "filteredReadRequestsCount", filteredReadRequestsCount);
|
||||
Strings.appendKeyValue(sb, "writeRequestsCount", writeRequestsCount);
|
||||
Strings.appendKeyValue(sb, "rootIndexSizeKB", rootLevelIndexSizeKB);
|
||||
|
|
|
@ -256,6 +256,9 @@ public interface MetricsRegionServerSource extends BaseSource, JvmPauseMonitorSo
|
|||
String READ_REQUEST_COUNT = "readRequestCount";
|
||||
String READ_REQUEST_COUNT_DESC =
|
||||
"Number of read requests with non-empty Results that this RegionServer has answered.";
|
||||
String CP_REQUEST_COUNT = "cpRequestCount";
|
||||
String CP_REQUEST_COUNT_DESC =
|
||||
"Number of coprocessor service requests this region server has answered.";
|
||||
String FILTERED_READ_REQUEST_COUNT = "filteredReadRequestCount";
|
||||
String FILTERED_READ_REQUEST_COUNT_DESC =
|
||||
"Number of filtered read requests this RegionServer has answered.";
|
||||
|
|
|
@ -135,6 +135,11 @@ public interface MetricsRegionServerWrapper {
|
|||
*/
|
||||
long getReadRequestsCount();
|
||||
|
||||
/**
|
||||
* Get the number of coprocessor requests to regions hosted on this region server.
|
||||
*/
|
||||
long getCpRequestsCount();
|
||||
|
||||
/**
|
||||
* Get the number of filtered read requests to regions hosted on this region server.
|
||||
*/
|
||||
|
|
|
@ -72,6 +72,11 @@ public interface MetricsRegionWrapper {
|
|||
*/
|
||||
long getReadRequestCount();
|
||||
|
||||
/**
|
||||
* Get the total number of CoprocessorService requests that have been issued against this region
|
||||
*/
|
||||
long getCpRequestCount();
|
||||
|
||||
/**
|
||||
* Get the total number of filtered read requests that have been issued against this region
|
||||
*/
|
||||
|
|
|
@ -29,6 +29,8 @@ public interface MetricsTableSource extends Comparable<MetricsTableSource> {
|
|||
|
||||
String READ_REQUEST_COUNT = "readRequestCount";
|
||||
String READ_REQUEST_COUNT_DESC = "Number of read requests";
|
||||
String CP_REQUEST_COUNT = "cpRequestCount";
|
||||
String CP_REQUEST_COUNT_DESC = "Number of coprocessor service requests";
|
||||
String WRITE_REQUEST_COUNT = "writeRequestCount";
|
||||
String WRITE_REQUEST_COUNT_DESC = "Number of write requests";
|
||||
String TOTAL_REQUEST_COUNT = "totalRequestCount";
|
||||
|
|
|
@ -32,6 +32,11 @@ public interface MetricsTableWrapperAggregate {
|
|||
*/
|
||||
long getReadRequestsCount(String table);
|
||||
|
||||
/**
|
||||
* Get the number of CoprocessorService requests that have been issued against this table
|
||||
*/
|
||||
long getCpRequestsCount(String table);
|
||||
|
||||
/**
|
||||
* Get the number of write requests that have been issued against this table
|
||||
*/
|
||||
|
|
|
@ -340,6 +340,8 @@ public class MetricsRegionServerSourceImpl
|
|||
TOTAL_ROW_ACTION_REQUEST_COUNT_DESC), rsWrap.getTotalRowActionRequestCount())
|
||||
.addCounter(Interns.info(READ_REQUEST_COUNT, READ_REQUEST_COUNT_DESC),
|
||||
rsWrap.getReadRequestsCount())
|
||||
.addCounter(Interns.info(CP_REQUEST_COUNT, CP_REQUEST_COUNT_DESC),
|
||||
rsWrap.getCpRequestsCount())
|
||||
.addCounter(Interns.info(FILTERED_READ_REQUEST_COUNT,
|
||||
FILTERED_READ_REQUEST_COUNT_DESC), rsWrap.getFilteredReadRequestsCount())
|
||||
.addCounter(Interns.info(WRITE_REQUEST_COUNT, WRITE_REQUEST_COUNT_DESC),
|
||||
|
|
|
@ -261,6 +261,10 @@ public class MetricsRegionSourceImpl implements MetricsRegionSource {
|
|||
regionNamePrefix + MetricsRegionServerSource.READ_REQUEST_COUNT,
|
||||
MetricsRegionServerSource.READ_REQUEST_COUNT_DESC),
|
||||
this.regionWrapper.getReadRequestCount());
|
||||
mrb.addCounter(Interns.info(
|
||||
regionNamePrefix + MetricsRegionServerSource.CP_REQUEST_COUNT,
|
||||
MetricsRegionServerSource.CP_REQUEST_COUNT_DESC),
|
||||
this.regionWrapper.getCpRequestCount());
|
||||
mrb.addCounter(Interns.info(
|
||||
regionNamePrefix + MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT,
|
||||
MetricsRegionServerSource.FILTERED_READ_REQUEST_COUNT_DESC),
|
||||
|
|
|
@ -125,6 +125,9 @@ public class MetricsTableSourceImpl implements MetricsTableSource {
|
|||
mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.READ_REQUEST_COUNT,
|
||||
MetricsTableSource.READ_REQUEST_COUNT_DESC),
|
||||
tableWrapperAgg.getReadRequestsCount(tableName.getNameAsString()));
|
||||
mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.CP_REQUEST_COUNT,
|
||||
MetricsTableSource.CP_REQUEST_COUNT_DESC),
|
||||
tableWrapperAgg.getCpRequestsCount(tableName.getNameAsString()));
|
||||
mrb.addCounter(Interns.info(tableNamePrefix + MetricsTableSource.WRITE_REQUEST_COUNT,
|
||||
MetricsTableSource.WRITE_REQUEST_COUNT_DESC),
|
||||
tableWrapperAgg.getWriteRequestsCount(tableName.getNameAsString()));
|
||||
|
|
|
@ -134,6 +134,11 @@ public class TestMetricsRegionSourceImpl {
|
|||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestCount() {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getWriteRequestCount() {
|
||||
return 0;
|
||||
|
|
|
@ -86,6 +86,11 @@ public class TestMetricsTableSourceImpl {
|
|||
return 10;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestsCount(String table) {
|
||||
return 15;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getWriteRequestsCount(String table) {
|
||||
return 20;
|
||||
|
|
|
@ -139,6 +139,9 @@ message RegionLoad {
|
|||
|
||||
/** the current total filtered read requests made to region */
|
||||
optional uint64 filtered_read_requests_count = 19;
|
||||
|
||||
/** the current total coprocessor requests made to region */
|
||||
optional uint64 cp_requests_count = 20;
|
||||
}
|
||||
|
||||
/* Server-level protobufs */
|
||||
|
|
|
@ -139,6 +139,9 @@ message RegionLoad {
|
|||
|
||||
/** the current total filtered read requests made to region */
|
||||
optional uint64 filtered_read_requests_count = 19;
|
||||
|
||||
/** the current total coprocessor requests made to region */
|
||||
optional uint64 cp_requests_count = 20;
|
||||
}
|
||||
|
||||
/* Server-level protobufs */
|
||||
|
|
|
@ -92,6 +92,7 @@ public class StorageClusterStatusResource extends ResourceBase {
|
|||
(int) region.getMemStoreSize().get(Size.Unit.MEGABYTE),
|
||||
(long) region.getStoreFileIndexSize().get(Size.Unit.KILOBYTE),
|
||||
region.getReadRequestCount(),
|
||||
region.getCpRequestCount(),
|
||||
region.getWriteRequestCount(),
|
||||
(int) region.getStoreFileRootLevelIndexSize().get(Size.Unit.KILOBYTE),
|
||||
(int) region.getStoreFileUncompressedDataIndexSize().get(Size.Unit.KILOBYTE),
|
||||
|
|
|
@ -85,6 +85,7 @@ import com.fasterxml.jackson.annotation.JsonProperty;
|
|||
* <attribute name="memstoreSizeMB" type="int"></attribute>
|
||||
* <attribute name="storefileIndexSizeMB" type="int"></attribute>
|
||||
* <attribute name="readRequestsCount" type="int"></attribute>
|
||||
* <attribute name="cpRequestsCount" type="int"></attribute>
|
||||
* <attribute name="writeRequestsCount" type="int"></attribute>
|
||||
* <attribute name="rootIndexSizeKB" type="int"></attribute>
|
||||
* <attribute name="totalStaticIndexSizeKB" type="int"></attribute>
|
||||
|
@ -119,6 +120,7 @@ public class StorageClusterStatusModel
|
|||
private int memstoreSizeMB;
|
||||
private long storefileIndexSizeKB;
|
||||
private long readRequestsCount;
|
||||
private long cpRequestsCount;
|
||||
private long writeRequestsCount;
|
||||
private int rootIndexSizeKB;
|
||||
private int totalStaticIndexSizeKB;
|
||||
|
@ -151,8 +153,8 @@ public class StorageClusterStatusModel
|
|||
*/
|
||||
public Region(byte[] name, int stores, int storefiles,
|
||||
int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB,
|
||||
long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
|
||||
int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
|
||||
long readRequestsCount, long cpRequestsCount, long writeRequestsCount,
|
||||
int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
|
||||
long totalCompactingKVs, long currentCompactedKVs) {
|
||||
this.name = name;
|
||||
this.stores = stores;
|
||||
|
@ -161,6 +163,7 @@ public class StorageClusterStatusModel
|
|||
this.memstoreSizeMB = memstoreSizeMB;
|
||||
this.storefileIndexSizeKB = storefileIndexSizeKB;
|
||||
this.readRequestsCount = readRequestsCount;
|
||||
this.cpRequestsCount = cpRequestsCount;
|
||||
this.writeRequestsCount = writeRequestsCount;
|
||||
this.rootIndexSizeKB = rootIndexSizeKB;
|
||||
this.totalStaticIndexSizeKB = totalStaticIndexSizeKB;
|
||||
|
@ -225,6 +228,14 @@ public class StorageClusterStatusModel
|
|||
return readRequestsCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the current total read requests made to region
|
||||
*/
|
||||
@XmlAttribute
|
||||
public long getCpRequestsCount() {
|
||||
return cpRequestsCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the current total write requests made to region
|
||||
*/
|
||||
|
@ -280,6 +291,13 @@ public class StorageClusterStatusModel
|
|||
this.readRequestsCount = readRequestsCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param cpRequestsCount The current total read requests made to region
|
||||
*/
|
||||
public void setCpRequestsCount(long cpRequestsCount) {
|
||||
this.cpRequestsCount = cpRequestsCount;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param rootIndexSizeKB The current total size of root-level indexes
|
||||
* for the region, in KB
|
||||
|
@ -383,11 +401,11 @@ public class StorageClusterStatusModel
|
|||
*/
|
||||
public void addRegion(byte[] name, int stores, int storefiles,
|
||||
int storefileSizeMB, int memstoreSizeMB, long storefileIndexSizeKB,
|
||||
long readRequestsCount, long writeRequestsCount, int rootIndexSizeKB,
|
||||
int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
|
||||
long readRequestsCount, long cpRequestsCount, long writeRequestsCount,
|
||||
int rootIndexSizeKB, int totalStaticIndexSizeKB, int totalStaticBloomSizeKB,
|
||||
long totalCompactingKVs, long currentCompactedKVs) {
|
||||
regions.add(new Region(name, stores, storefiles, storefileSizeMB,
|
||||
memstoreSizeMB, storefileIndexSizeKB, readRequestsCount,
|
||||
memstoreSizeMB, storefileIndexSizeKB, readRequestsCount, cpRequestsCount,
|
||||
writeRequestsCount, rootIndexSizeKB, totalStaticIndexSizeKB,
|
||||
totalStaticBloomSizeKB, totalCompactingKVs, currentCompactedKVs));
|
||||
}
|
||||
|
@ -683,6 +701,8 @@ public class StorageClusterStatusModel
|
|||
sb.append(region.storefileIndexSizeKB);
|
||||
sb.append("\n readRequestsCount=");
|
||||
sb.append(region.readRequestsCount);
|
||||
sb.append("\n cpRequestsCount=");
|
||||
sb.append(region.cpRequestsCount);
|
||||
sb.append("\n writeRequestsCount=");
|
||||
sb.append(region.writeRequestsCount);
|
||||
sb.append("\n rootIndexSizeKB=");
|
||||
|
@ -737,6 +757,7 @@ public class StorageClusterStatusModel
|
|||
regionBuilder.setMemStoreSizeMB(region.memstoreSizeMB);
|
||||
regionBuilder.setStorefileIndexSizeKB(region.storefileIndexSizeKB);
|
||||
regionBuilder.setReadRequestsCount(region.readRequestsCount);
|
||||
regionBuilder.setCpRequestsCount(region.cpRequestsCount);
|
||||
regionBuilder.setWriteRequestsCount(region.writeRequestsCount);
|
||||
regionBuilder.setRootIndexSizeKB(region.rootIndexSizeKB);
|
||||
regionBuilder.setTotalStaticIndexSizeKB(region.totalStaticIndexSizeKB);
|
||||
|
@ -783,6 +804,7 @@ public class StorageClusterStatusModel
|
|||
region.getMemStoreSizeMB(),
|
||||
region.getStorefileIndexSizeKB(),
|
||||
region.getReadRequestsCount(),
|
||||
region.getCpRequestsCount(),
|
||||
region.getWriteRequestsCount(),
|
||||
region.getRootIndexSizeKB(),
|
||||
region.getTotalStaticIndexSizeKB(),
|
||||
|
|
|
@ -32,6 +32,7 @@ message StorageClusterStatus {
|
|||
optional int32 totalStaticBloomSizeKB = 11;
|
||||
optional int64 totalCompactingKVs = 12;
|
||||
optional int64 currentCompactedKVs = 13;
|
||||
optional int64 cpRequestsCount = 14;
|
||||
}
|
||||
message Node {
|
||||
required string name = 1; // name:port
|
||||
|
|
|
@ -68,16 +68,17 @@ public class TestStorageClusterStatusModel extends TestModelBase<StorageClusterS
|
|||
"{\"regions\":2,\"requests\":0,\"averageLoad\":1.0,\"LiveNodes\":[{\"name\":\"test1\"," +
|
||||
"\"Region\":[{\"name\":\"aGJhc2U6cm9vdCwsMA==\",\"stores\":1,\"storefiles\":1," +
|
||||
"\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," +
|
||||
"\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
|
||||
"\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," +
|
||||
"\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245219839331," +
|
||||
"\"heapSizeMB\":128,\"maxHeapSizeMB\":1024},{\"name\":\"test2\"," +
|
||||
"\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\",\"stores\":1," +
|
||||
"\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0,\"storefileIndexSizeKB\":0," +
|
||||
"\"readRequestsCount\":1,\"writeRequestsCount\":2,\"rootIndexSizeKB\":1," +
|
||||
"\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1," +
|
||||
"\"currentCompactedKVs\":1}],\"requests\":0,\"startCode\":1245239331198," +
|
||||
"\"heapSizeMB\":512,\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}";
|
||||
"\"readRequestsCount\":1,\"cpRequestsCount\":1,\"writeRequestsCount\":2," +
|
||||
"\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1,\"totalStaticBloomSizeKB\":1," +
|
||||
"\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}],\"requests\":0," +
|
||||
"\"startCode\":1245219839331,\"heapSizeMB\":128,\"maxHeapSizeMB\":1024}," +
|
||||
"{\"name\":\"test2\",\"Region\":[{\"name\":\"aGJhc2U6bWV0YSwsMTI0NjAwMDA0MzcyNA==\"," +
|
||||
"\"stores\":1,\"storefiles\":1,\"storefileSizeMB\":0,\"memStoreSizeMB\":0," +
|
||||
"\"storefileIndexSizeKB\":0,\"readRequestsCount\":1,\"cpRequestsCount\":1," +
|
||||
"\"writeRequestsCount\":2,\"rootIndexSizeKB\":1,\"totalStaticIndexSizeKB\":1," +
|
||||
"\"totalStaticBloomSizeKB\":1,\"totalCompactingKVs\":1,\"currentCompactedKVs\":1}]," +
|
||||
"\"requests\":0,\"startCode\":1245239331198,\"heapSizeMB\":512," +
|
||||
"\"maxHeapSizeMB\":1024}],\"DeadNodes\":[]}";
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -87,10 +88,10 @@ public class TestStorageClusterStatusModel extends TestModelBase<StorageClusterS
|
|||
model.setRequests(0);
|
||||
model.setAverageLoad(1.0);
|
||||
model.addLiveNode("test1", 1245219839331L, 128, 1024)
|
||||
.addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 2, 1, 1, 1, 1, 1);
|
||||
.addRegion(Bytes.toBytes("hbase:root,,0"), 1, 1, 0, 0, 0, 1, 1, 2, 1, 1, 1, 1, 1);
|
||||
model.addLiveNode("test2", 1245239331198L, 512, 1024)
|
||||
.addRegion(Bytes.toBytes(TableName.META_TABLE_NAME+",,1246000043724"),1, 1, 0, 0, 0,
|
||||
1, 2, 1, 1, 1, 1, 1);
|
||||
1, 1, 2, 1, 1, 1, 1, 1);
|
||||
return model;
|
||||
}
|
||||
|
||||
|
|
|
@ -31,12 +31,14 @@ import org.apache.yetus.audience.InterfaceStability;
|
|||
@InterfaceStability.Evolving
|
||||
class BalancerRegionLoad {
|
||||
private final long readRequestsCount;
|
||||
private final long cpRequestsCount;
|
||||
private final long writeRequestsCount;
|
||||
private final int memStoreSizeMB;
|
||||
private final int storefileSizeMB;
|
||||
|
||||
BalancerRegionLoad(RegionMetrics regionMetrics) {
|
||||
readRequestsCount = regionMetrics.getReadRequestCount();
|
||||
cpRequestsCount = regionMetrics.getCpRequestCount();
|
||||
writeRequestsCount = regionMetrics.getWriteRequestCount();
|
||||
memStoreSizeMB = (int) regionMetrics.getMemStoreSize().get(Size.Unit.MEGABYTE);
|
||||
storefileSizeMB = (int) regionMetrics.getStoreFileSize().get(Size.Unit.MEGABYTE);
|
||||
|
@ -46,6 +48,10 @@ class BalancerRegionLoad {
|
|||
return readRequestsCount;
|
||||
}
|
||||
|
||||
public long getCpRequestsCount() {
|
||||
return cpRequestsCount;
|
||||
}
|
||||
|
||||
public long getWriteRequestsCount() {
|
||||
return writeRequestsCount;
|
||||
}
|
||||
|
|
|
@ -189,6 +189,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
}
|
||||
regionLoadFunctions = new CostFromRegionLoadFunction[] {
|
||||
new ReadRequestCostFunction(conf),
|
||||
new CPRequestCostFunction(conf),
|
||||
new WriteRequestCostFunction(conf),
|
||||
new MemStoreSizeCostFunction(conf),
|
||||
new StoreFileCostFunction(conf)
|
||||
|
@ -208,6 +209,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
regionLoadFunctions[1],
|
||||
regionLoadFunctions[2],
|
||||
regionLoadFunctions[3],
|
||||
regionLoadFunctions[4]
|
||||
};
|
||||
curFunctionCosts= new Double[costFunctions.length];
|
||||
tempFunctionCosts= new Double[costFunctions.length];
|
||||
|
@ -1475,6 +1477,28 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the cost of total number of coprocessor requests The more unbalanced the higher the
|
||||
* computed cost will be. This uses a rolling average of regionload.
|
||||
*/
|
||||
|
||||
static class CPRequestCostFunction extends CostFromRegionLoadAsRateFunction {
|
||||
|
||||
private static final String CP_REQUEST_COST_KEY =
|
||||
"hbase.master.balancer.stochastic.cpRequestCost";
|
||||
private static final float DEFAULT_CP_REQUEST_COST = 5;
|
||||
|
||||
CPRequestCostFunction(Configuration conf) {
|
||||
super(conf);
|
||||
this.setMultiplier(conf.getFloat(CP_REQUEST_COST_KEY, DEFAULT_CP_REQUEST_COST));
|
||||
}
|
||||
|
||||
@Override
|
||||
protected double getCostFromRl(BalancerRegionLoad rl) {
|
||||
return rl.getCpRequestsCount();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the cost of total number of write requests. The more unbalanced the higher the
|
||||
* computed cost will be. This uses a rolling average of regionload.
|
||||
|
@ -1678,6 +1702,7 @@ public class StochasticLoadBalancer extends BaseLoadBalancer {
|
|||
return rl.getMemStoreSizeMB();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Compute the cost of total open storefiles size. The more unbalanced the higher the
|
||||
* computed cost will be. This uses a rolling average of regionload.
|
||||
|
|
|
@ -306,6 +306,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
// Number of requests
|
||||
// Count rows for scan
|
||||
final LongAdder readRequestsCount = new LongAdder();
|
||||
final LongAdder cpRequestsCount = new LongAdder();
|
||||
final LongAdder filteredReadRequestsCount = new LongAdder();
|
||||
// Count rows for multi row mutations
|
||||
final LongAdder writeRequestsCount = new LongAdder();
|
||||
|
@ -1270,6 +1271,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
return readRequestsCount.sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestsCount() {
|
||||
return cpRequestsCount.sum();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilteredReadRequestsCount() {
|
||||
return filteredReadRequestsCount.sum();
|
||||
|
@ -8005,14 +8011,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
ClassSize.OBJECT +
|
||||
ClassSize.ARRAY +
|
||||
51 * ClassSize.REFERENCE + 3 * Bytes.SIZEOF_INT +
|
||||
(14 * Bytes.SIZEOF_LONG) +
|
||||
(15 * Bytes.SIZEOF_LONG) +
|
||||
3 * Bytes.SIZEOF_BOOLEAN);
|
||||
|
||||
// woefully out of date - currently missing:
|
||||
// 1 x HashMap - coprocessorServiceHandlers
|
||||
// 6 x LongAdder - numMutationsWithoutWAL, dataInMemoryWithoutWAL,
|
||||
// checkAndMutateChecksPassed, checkAndMutateChecksFailed, readRequestsCount,
|
||||
// writeRequestsCount
|
||||
// writeRequestsCount, cpRequestsCount
|
||||
// 1 x HRegion$WriteState - writestate
|
||||
// 1 x RegionCoprocessorHost - coprocessorHost
|
||||
// 1 x RegionSplitPolicy - splitPolicy
|
||||
|
@ -8100,6 +8106,7 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
}
|
||||
com.google.protobuf.Descriptors.ServiceDescriptor serviceDesc = service.getDescriptorForType();
|
||||
|
||||
cpRequestsCount.increment();
|
||||
String methodName = call.getMethodName();
|
||||
com.google.protobuf.Descriptors.MethodDescriptor methodDesc =
|
||||
CoprocessorRpcUtils.getMethodDescriptor(methodName, serviceDesc);
|
||||
|
|
|
@ -1668,6 +1668,7 @@ public class HRegionServer extends HasThread implements
|
|||
.setTotalStaticIndexSizeKB(totalStaticIndexSizeKB)
|
||||
.setTotalStaticBloomSizeKB(totalStaticBloomSizeKB)
|
||||
.setReadRequestsCount(r.getReadRequestsCount())
|
||||
.setCpRequestsCount(r.getCpRequestsCount())
|
||||
.setFilteredReadRequestsCount(r.getFilteredReadRequestsCount())
|
||||
.setWriteRequestsCount(r.getWriteRequestsCount())
|
||||
.setTotalCompactingKVs(totalCompactingKVs)
|
||||
|
|
|
@ -74,6 +74,7 @@ class MetricsRegionServerWrapperImpl
|
|||
private volatile long numReferenceFiles = 0;
|
||||
private volatile double requestsPerSecond = 0.0;
|
||||
private volatile long readRequestsCount = 0;
|
||||
private volatile long cpRequestsCount = 0;
|
||||
private volatile long filteredReadRequestsCount = 0;
|
||||
private volatile long writeRequestsCount = 0;
|
||||
private volatile long checkAndMutateChecksFailed = 0;
|
||||
|
@ -518,6 +519,11 @@ class MetricsRegionServerWrapperImpl
|
|||
return readRequestsCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestsCount() {
|
||||
return cpRequestsCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilteredReadRequestsCount() {
|
||||
return filteredReadRequestsCount;
|
||||
|
@ -727,7 +733,7 @@ class MetricsRegionServerWrapperImpl
|
|||
long avgAgeNumerator = 0, numHFiles = 0;
|
||||
long tempMinStoreFileAge = Long.MAX_VALUE;
|
||||
long tempReadRequestsCount = 0, tempFilteredReadRequestsCount = 0,
|
||||
tempWriteRequestsCount = 0;
|
||||
tempWriteRequestsCount = 0, tempCpRequestsCount = 0;
|
||||
long tempCheckAndMutateChecksFailed = 0;
|
||||
long tempCheckAndMutateChecksPassed = 0;
|
||||
long tempStorefileIndexSize = 0;
|
||||
|
@ -758,6 +764,7 @@ class MetricsRegionServerWrapperImpl
|
|||
tempNumMutationsWithoutWAL += r.getNumMutationsWithoutWAL();
|
||||
tempDataInMemoryWithoutWAL += r.getDataInMemoryWithoutWAL();
|
||||
tempReadRequestsCount += r.getReadRequestsCount();
|
||||
tempCpRequestsCount += r.getCpRequestsCount();
|
||||
tempFilteredReadRequestsCount += r.getFilteredReadRequestsCount();
|
||||
tempWriteRequestsCount += r.getWriteRequestsCount();
|
||||
tempCheckAndMutateChecksFailed += r.getCheckAndMutateChecksFailed();
|
||||
|
@ -873,6 +880,7 @@ class MetricsRegionServerWrapperImpl
|
|||
|
||||
numReferenceFiles= tempNumReferenceFiles;
|
||||
readRequestsCount = tempReadRequestsCount;
|
||||
cpRequestsCount = tempCpRequestsCount;
|
||||
filteredReadRequestsCount = tempFilteredReadRequestsCount;
|
||||
writeRequestsCount = tempWriteRequestsCount;
|
||||
checkAndMutateChecksFailed = tempCheckAndMutateChecksFailed;
|
||||
|
|
|
@ -124,6 +124,11 @@ public class MetricsRegionWrapperImpl implements MetricsRegionWrapper, Closeable
|
|||
return this.region.getReadRequestsCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestCount() {
|
||||
return this.region.getCpRequestsCount();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilteredReadRequestCount() {
|
||||
return this.region.getFilteredReadRequestsCount();
|
||||
|
|
|
@ -76,8 +76,10 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
|
|||
metricsTable.setStoreFilesSize(metricsTable.getStoreFilesSize() + tempStorefilesSize);
|
||||
metricsTable.setTableSize(metricsTable.getMemStoresSize() + metricsTable.getStoreFilesSize());
|
||||
metricsTable.setReadRequestsCount(metricsTable.getReadRequestsCount() + r.getReadRequestsCount());
|
||||
metricsTable.setCpRequestsCount(metricsTable.getCpRequestsCount() + r.getCpRequestsCount());
|
||||
metricsTable.setWriteRequestsCount(metricsTable.getWriteRequestsCount() + r.getWriteRequestsCount());
|
||||
metricsTable.setTotalRequestsCount(metricsTable.getReadRequestsCount() + metricsTable.getWriteRequestsCount());
|
||||
metricsTable.setTotalRequestsCount(metricsTable.getReadRequestsCount()
|
||||
+ metricsTable.getWriteRequestsCount() + metricsTable.getCpRequestsCount());
|
||||
}
|
||||
|
||||
for(Map.Entry<TableName, MetricsTableValues> entry : localMetricsTableMap.entrySet()) {
|
||||
|
@ -108,55 +110,71 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
|
|||
@Override
|
||||
public long getReadRequestsCount(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null)
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
else
|
||||
} else {
|
||||
return metricsTable.getReadRequestsCount();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestsCount(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
} else {
|
||||
return metricsTable.getCpRequestsCount();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getWriteRequestsCount(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null)
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
else
|
||||
} else {
|
||||
return metricsTable.getWriteRequestsCount();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTotalRequestsCount(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null)
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
else
|
||||
} else {
|
||||
return metricsTable.getTotalRequestsCount();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getMemStoresSize(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null)
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
else
|
||||
} else {
|
||||
return metricsTable.getMemStoresSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getStoreFilesSize(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null)
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
else
|
||||
} else {
|
||||
return metricsTable.getStoreFilesSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getTableSize(String table) {
|
||||
MetricsTableValues metricsTable = metricsTableMap.get(TableName.valueOf(table));
|
||||
if (metricsTable == null)
|
||||
if (metricsTable == null) {
|
||||
return 0;
|
||||
else
|
||||
} else {
|
||||
return metricsTable.getTableSize();
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -168,6 +186,7 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
|
|||
|
||||
private long totalRequestsCount;
|
||||
private long readRequestsCount;
|
||||
private long cpRequestsCount;
|
||||
private long writeRequestsCount;
|
||||
private long memstoresSize;
|
||||
private long storeFilesSize;
|
||||
|
@ -189,6 +208,14 @@ public class MetricsTableWrapperAggregateImpl implements MetricsTableWrapperAggr
|
|||
this.readRequestsCount = readRequestsCount;
|
||||
}
|
||||
|
||||
public long getCpRequestsCount() {
|
||||
return cpRequestsCount;
|
||||
}
|
||||
|
||||
public void setCpRequestsCount(long cpRequestsCount) {
|
||||
this.cpRequestsCount = cpRequestsCount;
|
||||
}
|
||||
|
||||
public long getWriteRequestsCount() {
|
||||
return writeRequestsCount;
|
||||
}
|
||||
|
|
|
@ -140,6 +140,9 @@ public interface Region extends ConfigurationObserver {
|
|||
/** @return read requests count for this region */
|
||||
long getReadRequestsCount();
|
||||
|
||||
/** @return coprocessor requests count for this region */
|
||||
long getCpRequestsCount();
|
||||
|
||||
/** @return filtered read requests count for this region */
|
||||
long getFilteredReadRequestsCount();
|
||||
|
||||
|
|
|
@ -56,6 +56,8 @@ public class TestServerMetrics {
|
|||
.mapToDouble(v -> v.getStoreFileIndexSize().get(Size.Unit.KILOBYTE)).sum(), 0);
|
||||
assertEquals(((long) Integer.MAX_VALUE) * 2,
|
||||
metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getReadRequestCount()).sum());
|
||||
assertEquals(100,
|
||||
metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getCpRequestCount()).sum());
|
||||
assertEquals(300,
|
||||
metrics.getRegionMetrics().values().stream().mapToLong(v -> v.getFilteredReadRequestCount())
|
||||
.sum());
|
||||
|
@ -106,6 +108,7 @@ public class TestServerMetrics {
|
|||
.setStorefiles(13).setStoreUncompressedSizeMB(23).setStorefileSizeMB(300)
|
||||
.setFilteredReadRequestsCount(200).setStorefileIndexSizeKB(40).setRootIndexSizeKB(303)
|
||||
.setReadRequestsCount(Integer.MAX_VALUE).setWriteRequestsCount(Integer.MAX_VALUE)
|
||||
.setCpRequestsCount(100)
|
||||
.build();
|
||||
|
||||
ClusterStatusProtos.ServerLoad sl =
|
||||
|
|
|
@ -25,10 +25,14 @@ import static org.mockito.Mockito.mock;
|
|||
import static org.mockito.Mockito.when;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Queue;
|
||||
import java.util.Set;
|
||||
import java.util.TreeMap;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.ClusterMetrics;
|
||||
import org.apache.hadoop.hbase.HBaseClassTestRule;
|
||||
|
@ -116,6 +120,81 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
|
|||
},
|
||||
};
|
||||
|
||||
private ServerMetrics mockServerMetricsWithCpRequests(ServerName server,
|
||||
List<RegionInfo> regionsOnServer, long cpRequestCount) {
|
||||
ServerMetrics serverMetrics = mock(ServerMetrics.class);
|
||||
Map<byte[], RegionMetrics> regionLoadMap = new TreeMap<>(Bytes.BYTES_COMPARATOR);
|
||||
for(RegionInfo info : regionsOnServer){
|
||||
RegionMetrics rl = mock(RegionMetrics.class);
|
||||
when(rl.getReadRequestCount()).thenReturn(0L);
|
||||
when(rl.getCpRequestCount()).thenReturn(cpRequestCount);
|
||||
when(rl.getWriteRequestCount()).thenReturn(0L);
|
||||
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
|
||||
when(rl.getStoreFileSize()).thenReturn(Size.ZERO);
|
||||
regionLoadMap.put(info.getEncodedNameAsBytes(), rl);
|
||||
}
|
||||
when(serverMetrics.getRegionMetrics()).thenReturn(regionLoadMap);
|
||||
return serverMetrics;
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCPRequestCost() {
|
||||
// in order to pass needsBalance judgement
|
||||
conf.setFloat("hbase.master.balancer.stochastic.cpRequestCost", 10000f);
|
||||
loadBalancer.setConf(conf);
|
||||
// mock cluster State
|
||||
Map<ServerName, List<RegionInfo>> clusterState = new HashMap<ServerName, List<RegionInfo>>();
|
||||
ServerName serverA = randomServer(3).getServerName();
|
||||
ServerName serverB = randomServer(3).getServerName();
|
||||
ServerName serverC = randomServer(3).getServerName();
|
||||
List<RegionInfo> regionsOnServerA = randomRegions(3);
|
||||
List<RegionInfo> regionsOnServerB = randomRegions(3);
|
||||
List<RegionInfo> regionsOnServerC = randomRegions(3);
|
||||
clusterState.put(serverA, regionsOnServerA);
|
||||
clusterState.put(serverB, regionsOnServerB);
|
||||
clusterState.put(serverC, regionsOnServerC);
|
||||
// mock ClusterMetrics
|
||||
Map<ServerName, ServerMetrics> serverMetricsMap = new TreeMap<>();
|
||||
serverMetricsMap.put(serverA, mockServerMetricsWithCpRequests(serverA, regionsOnServerA, 0));
|
||||
serverMetricsMap.put(serverB, mockServerMetricsWithCpRequests(serverB, regionsOnServerB, 0));
|
||||
serverMetricsMap.put(serverC, mockServerMetricsWithCpRequests(serverC, regionsOnServerC, 0));
|
||||
ClusterMetrics clusterStatus = mock(ClusterMetrics.class);
|
||||
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
|
||||
loadBalancer.setClusterMetrics(clusterStatus);
|
||||
|
||||
// CPRequestCostFunction are Rate based, So doing setClusterMetrics again
|
||||
// this time, regions on serverA with more cpRequestCount load
|
||||
// serverA : 1000,1000,1000
|
||||
// serverB : 0,0,0
|
||||
// serverC : 0,0,0
|
||||
// so should move two regions from serverA to serverB & serverC
|
||||
serverMetricsMap = new TreeMap<>();
|
||||
serverMetricsMap.put(serverA, mockServerMetricsWithCpRequests(serverA,
|
||||
regionsOnServerA, 1000));
|
||||
serverMetricsMap.put(serverB, mockServerMetricsWithCpRequests(serverB, regionsOnServerB, 0));
|
||||
serverMetricsMap.put(serverC, mockServerMetricsWithCpRequests(serverC, regionsOnServerC, 0));
|
||||
clusterStatus = mock(ClusterMetrics.class);
|
||||
when(clusterStatus.getLiveServerMetrics()).thenReturn(serverMetricsMap);
|
||||
loadBalancer.setClusterMetrics(clusterStatus);
|
||||
|
||||
List<RegionPlan> plans = loadBalancer.balanceCluster(clusterState);
|
||||
Set<RegionInfo> regionsMoveFromServerA = new HashSet<>();
|
||||
Set<ServerName> targetServers = new HashSet<>();
|
||||
for(RegionPlan plan : plans) {
|
||||
if(plan.getSource().equals(serverA)) {
|
||||
regionsMoveFromServerA.add(plan.getRegionInfo());
|
||||
targetServers.add(plan.getDestination());
|
||||
}
|
||||
}
|
||||
// should move 2 regions from serverA, one moves to serverB, the other moves to serverC
|
||||
assertEquals(2, regionsMoveFromServerA.size());
|
||||
assertEquals(2, targetServers.size());
|
||||
assertTrue(regionsOnServerA.containsAll(regionsMoveFromServerA));
|
||||
// reset config
|
||||
conf.setFloat("hbase.master.balancer.stochastic.cpRequestCost", 5f);
|
||||
loadBalancer.setConf(conf);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testKeepRegionLoad() throws Exception {
|
||||
|
||||
|
@ -126,6 +205,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
|
|||
|
||||
RegionMetrics rl = mock(RegionMetrics.class);
|
||||
when(rl.getReadRequestCount()).thenReturn(0L);
|
||||
when(rl.getCpRequestCount()).thenReturn(0L);
|
||||
when(rl.getWriteRequestCount()).thenReturn(0L);
|
||||
when(rl.getMemStoreSize()).thenReturn(Size.ZERO);
|
||||
when(rl.getStoreFileSize()).thenReturn(new Size(i, Size.Unit.MEGABYTE));
|
||||
|
@ -291,6 +371,7 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
|
|||
for (int i = 1; i < 5; i++) {
|
||||
BalancerRegionLoad regionLoad = mock(BalancerRegionLoad.class);
|
||||
when(regionLoad.getReadRequestsCount()).thenReturn(new Long(i));
|
||||
when(regionLoad.getCpRequestsCount()).thenReturn(new Long(i));
|
||||
when(regionLoad.getStorefileSizeMB()).thenReturn(i);
|
||||
regionLoads.add(regionLoad);
|
||||
}
|
||||
|
@ -302,6 +383,12 @@ public class TestStochasticLoadBalancer extends BalancerTestBase {
|
|||
// read requests are treated as a rate so the average rate here is simply 1
|
||||
assertEquals(1, rateResult, 0.01);
|
||||
|
||||
StochasticLoadBalancer.CPRequestCostFunction cpCostFunction =
|
||||
new StochasticLoadBalancer.CPRequestCostFunction(conf);
|
||||
rateResult = cpCostFunction.getRegionLoadCost(regionLoads);
|
||||
// coprocessor requests are treated as a rate so the average rate here is simply 1
|
||||
assertEquals(1, rateResult, 0.01);
|
||||
|
||||
StochasticLoadBalancer.StoreFileCostFunction storeFileCostFunction =
|
||||
new StochasticLoadBalancer.StoreFileCostFunction(conf);
|
||||
double result = storeFileCostFunction.getRegionLoadCost(regionLoads);
|
||||
|
|
|
@ -110,6 +110,11 @@ public class MetricsRegionServerWrapperStub implements MetricsRegionServerWrappe
|
|||
return 997;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestsCount() {
|
||||
return 998;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFilteredReadRequestsCount() {
|
||||
return 1997;
|
||||
|
|
|
@ -100,6 +100,11 @@ public class MetricsRegionWrapperStub implements MetricsRegionWrapper {
|
|||
return 2;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestCount() {
|
||||
return 108;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getWriteRequestCount() {
|
||||
return 106;
|
||||
|
|
|
@ -31,6 +31,11 @@ public class MetricsTableWrapperStub implements MetricsTableWrapperAggregate {
|
|||
return 10;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getCpRequestsCount(String table) {
|
||||
return 15;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getWriteRequestsCount(String table) {
|
||||
return 20;
|
||||
|
|
|
@ -62,6 +62,9 @@ public class TestMetricsRegion {
|
|||
HELPER.assertGauge(
|
||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
|
||||
103, agg);
|
||||
HELPER.assertCounter(
|
||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_cpRequestCount",
|
||||
108, agg);
|
||||
HELPER.assertCounter(
|
||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
|
||||
"filteredReadRequestCount",
|
||||
|
@ -83,6 +86,9 @@ public class TestMetricsRegion {
|
|||
HELPER.assertGauge(
|
||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_memstoreSize",
|
||||
103, agg);
|
||||
HELPER.assertCounter(
|
||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_cpRequestCount",
|
||||
108, agg);
|
||||
HELPER.assertCounter(
|
||||
"namespace_TestNS_table_MetricsRegionWrapperStub_region_DEADBEEF001_metric_" +
|
||||
"filteredReadRequestCount",
|
||||
|
|
|
@ -84,6 +84,7 @@ public class TestMetricsRegionServer {
|
|||
+ HELPER.getCounter("writeRequestCount", serverSource),
|
||||
serverSource);
|
||||
HELPER.assertCounter("readRequestCount", 997, serverSource);
|
||||
HELPER.assertCounter("cpRequestCount", 998, serverSource);
|
||||
HELPER.assertCounter("filteredReadRequestCount", 1997, serverSource);
|
||||
HELPER.assertCounter("writeRequestCount", 707, serverSource);
|
||||
HELPER.assertCounter("checkMutateFailedCount", 401, serverSource);
|
||||
|
|
|
@ -43,11 +43,12 @@ public class TestMetricsTableAggregate {
|
|||
String tableName = "testTableMetrics";
|
||||
MetricsTableWrapperStub tableWrapper = new MetricsTableWrapperStub(tableName);
|
||||
CompatibilitySingletonFactory.getInstance(MetricsRegionServerSourceFactory.class)
|
||||
.createTable(tableName, tableWrapper);
|
||||
.createTable(tableName, tableWrapper);
|
||||
MetricsTableAggregateSource agg = CompatibilitySingletonFactory
|
||||
.getInstance(MetricsRegionServerSourceFactory.class).getTableAggregate();
|
||||
|
||||
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_readRequestCount", 10, agg);
|
||||
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_cpRequestCount", 15, agg);
|
||||
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_writeRequestCount", 20, agg);
|
||||
HELPER.assertCounter("Namespace_default_table_testTableMetrics_metric_totalRequestCount", 30, agg);
|
||||
|
||||
|
|
Loading…
Reference in New Issue