HBASE-12361 Show data locality of region in table page (Liu Shaohui)
This commit is contained in:
parent
2d1cfc14f2
commit
a62f543c65
|
@ -160,6 +160,15 @@ public class RegionLoad {
|
||||||
return regionLoadPB.getStoreUncompressedSizeMB();
|
return regionLoadPB.getStoreUncompressedSizeMB();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the data locality of region in the regionserver.
|
||||||
|
*/
|
||||||
|
public float getDataLocality() {
|
||||||
|
if (regionLoadPB.hasDataLocality()) {
|
||||||
|
return regionLoadPB.getDataLocality();
|
||||||
|
}
|
||||||
|
return 0.0f;
|
||||||
|
}
|
||||||
/**
|
/**
|
||||||
* @see java.lang.Object#toString()
|
* @see java.lang.Object#toString()
|
||||||
*/
|
*/
|
||||||
|
@ -205,6 +214,8 @@ public class RegionLoad {
|
||||||
compactionProgressPct);
|
compactionProgressPct);
|
||||||
sb = Strings.appendKeyValue(sb, "completeSequenceId",
|
sb = Strings.appendKeyValue(sb, "completeSequenceId",
|
||||||
this.getCompleteSequenceId());
|
this.getCompleteSequenceId());
|
||||||
|
sb = Strings.appendKeyValue(sb, "dataLocality",
|
||||||
|
this.getDataLocality());
|
||||||
return sb.toString();
|
return sb.toString();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2153,6 +2153,24 @@ public final class ClusterStatusProtos {
|
||||||
* </pre>
|
* </pre>
|
||||||
*/
|
*/
|
||||||
long getCompleteSequenceId();
|
long getCompleteSequenceId();
|
||||||
|
|
||||||
|
// optional float data_locality = 16;
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
boolean hasDataLocality();
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
float getDataLocality();
|
||||||
}
|
}
|
||||||
/**
|
/**
|
||||||
* Protobuf type {@code RegionLoad}
|
* Protobuf type {@code RegionLoad}
|
||||||
|
@ -2288,6 +2306,11 @@ public final class ClusterStatusProtos {
|
||||||
completeSequenceId_ = input.readUInt64();
|
completeSequenceId_ = input.readUInt64();
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
case 133: {
|
||||||
|
bitField0_ |= 0x00008000;
|
||||||
|
dataLocality_ = input.readFloat();
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
||||||
|
@ -2706,6 +2729,30 @@ public final class ClusterStatusProtos {
|
||||||
return completeSequenceId_;
|
return completeSequenceId_;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional float data_locality = 16;
|
||||||
|
public static final int DATA_LOCALITY_FIELD_NUMBER = 16;
|
||||||
|
private float dataLocality_;
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasDataLocality() {
|
||||||
|
return ((bitField0_ & 0x00008000) == 0x00008000);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public float getDataLocality() {
|
||||||
|
return dataLocality_;
|
||||||
|
}
|
||||||
|
|
||||||
private void initFields() {
|
private void initFields() {
|
||||||
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
regionSpecifier_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
||||||
stores_ = 0;
|
stores_ = 0;
|
||||||
|
@ -2722,6 +2769,7 @@ public final class ClusterStatusProtos {
|
||||||
totalStaticIndexSizeKB_ = 0;
|
totalStaticIndexSizeKB_ = 0;
|
||||||
totalStaticBloomSizeKB_ = 0;
|
totalStaticBloomSizeKB_ = 0;
|
||||||
completeSequenceId_ = 0L;
|
completeSequenceId_ = 0L;
|
||||||
|
dataLocality_ = 0F;
|
||||||
}
|
}
|
||||||
private byte memoizedIsInitialized = -1;
|
private byte memoizedIsInitialized = -1;
|
||||||
public final boolean isInitialized() {
|
public final boolean isInitialized() {
|
||||||
|
@ -2788,6 +2836,9 @@ public final class ClusterStatusProtos {
|
||||||
if (((bitField0_ & 0x00004000) == 0x00004000)) {
|
if (((bitField0_ & 0x00004000) == 0x00004000)) {
|
||||||
output.writeUInt64(15, completeSequenceId_);
|
output.writeUInt64(15, completeSequenceId_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00008000) == 0x00008000)) {
|
||||||
|
output.writeFloat(16, dataLocality_);
|
||||||
|
}
|
||||||
getUnknownFields().writeTo(output);
|
getUnknownFields().writeTo(output);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2857,6 +2908,10 @@ public final class ClusterStatusProtos {
|
||||||
size += com.google.protobuf.CodedOutputStream
|
size += com.google.protobuf.CodedOutputStream
|
||||||
.computeUInt64Size(15, completeSequenceId_);
|
.computeUInt64Size(15, completeSequenceId_);
|
||||||
}
|
}
|
||||||
|
if (((bitField0_ & 0x00008000) == 0x00008000)) {
|
||||||
|
size += com.google.protobuf.CodedOutputStream
|
||||||
|
.computeFloatSize(16, dataLocality_);
|
||||||
|
}
|
||||||
size += getUnknownFields().getSerializedSize();
|
size += getUnknownFields().getSerializedSize();
|
||||||
memoizedSerializedSize = size;
|
memoizedSerializedSize = size;
|
||||||
return size;
|
return size;
|
||||||
|
@ -2955,6 +3010,10 @@ public final class ClusterStatusProtos {
|
||||||
result = result && (getCompleteSequenceId()
|
result = result && (getCompleteSequenceId()
|
||||||
== other.getCompleteSequenceId());
|
== other.getCompleteSequenceId());
|
||||||
}
|
}
|
||||||
|
result = result && (hasDataLocality() == other.hasDataLocality());
|
||||||
|
if (hasDataLocality()) {
|
||||||
|
result = result && (Float.floatToIntBits(getDataLocality()) == Float.floatToIntBits(other.getDataLocality()));
|
||||||
|
}
|
||||||
result = result &&
|
result = result &&
|
||||||
getUnknownFields().equals(other.getUnknownFields());
|
getUnknownFields().equals(other.getUnknownFields());
|
||||||
return result;
|
return result;
|
||||||
|
@ -3028,6 +3087,11 @@ public final class ClusterStatusProtos {
|
||||||
hash = (37 * hash) + COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
|
hash = (37 * hash) + COMPLETE_SEQUENCE_ID_FIELD_NUMBER;
|
||||||
hash = (53 * hash) + hashLong(getCompleteSequenceId());
|
hash = (53 * hash) + hashLong(getCompleteSequenceId());
|
||||||
}
|
}
|
||||||
|
if (hasDataLocality()) {
|
||||||
|
hash = (37 * hash) + DATA_LOCALITY_FIELD_NUMBER;
|
||||||
|
hash = (53 * hash) + Float.floatToIntBits(
|
||||||
|
getDataLocality());
|
||||||
|
}
|
||||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||||
memoizedHashCode = hash;
|
memoizedHashCode = hash;
|
||||||
return hash;
|
return hash;
|
||||||
|
@ -3172,6 +3236,8 @@ public final class ClusterStatusProtos {
|
||||||
bitField0_ = (bitField0_ & ~0x00002000);
|
bitField0_ = (bitField0_ & ~0x00002000);
|
||||||
completeSequenceId_ = 0L;
|
completeSequenceId_ = 0L;
|
||||||
bitField0_ = (bitField0_ & ~0x00004000);
|
bitField0_ = (bitField0_ & ~0x00004000);
|
||||||
|
dataLocality_ = 0F;
|
||||||
|
bitField0_ = (bitField0_ & ~0x00008000);
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3264,6 +3330,10 @@ public final class ClusterStatusProtos {
|
||||||
to_bitField0_ |= 0x00004000;
|
to_bitField0_ |= 0x00004000;
|
||||||
}
|
}
|
||||||
result.completeSequenceId_ = completeSequenceId_;
|
result.completeSequenceId_ = completeSequenceId_;
|
||||||
|
if (((from_bitField0_ & 0x00008000) == 0x00008000)) {
|
||||||
|
to_bitField0_ |= 0x00008000;
|
||||||
|
}
|
||||||
|
result.dataLocality_ = dataLocality_;
|
||||||
result.bitField0_ = to_bitField0_;
|
result.bitField0_ = to_bitField0_;
|
||||||
onBuilt();
|
onBuilt();
|
||||||
return result;
|
return result;
|
||||||
|
@ -3325,6 +3395,9 @@ public final class ClusterStatusProtos {
|
||||||
if (other.hasCompleteSequenceId()) {
|
if (other.hasCompleteSequenceId()) {
|
||||||
setCompleteSequenceId(other.getCompleteSequenceId());
|
setCompleteSequenceId(other.getCompleteSequenceId());
|
||||||
}
|
}
|
||||||
|
if (other.hasDataLocality()) {
|
||||||
|
setDataLocality(other.getDataLocality());
|
||||||
|
}
|
||||||
this.mergeUnknownFields(other.getUnknownFields());
|
this.mergeUnknownFields(other.getUnknownFields());
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
@ -4215,6 +4288,55 @@ public final class ClusterStatusProtos {
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// optional float data_locality = 16;
|
||||||
|
private float dataLocality_ ;
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public boolean hasDataLocality() {
|
||||||
|
return ((bitField0_ & 0x00008000) == 0x00008000);
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public float getDataLocality() {
|
||||||
|
return dataLocality_;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder setDataLocality(float value) {
|
||||||
|
bitField0_ |= 0x00008000;
|
||||||
|
dataLocality_ = value;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
/**
|
||||||
|
* <code>optional float data_locality = 16;</code>
|
||||||
|
*
|
||||||
|
* <pre>
|
||||||
|
** The current data locality for region in the regionserver
|
||||||
|
* </pre>
|
||||||
|
*/
|
||||||
|
public Builder clearDataLocality() {
|
||||||
|
bitField0_ = (bitField0_ & ~0x00008000);
|
||||||
|
dataLocality_ = 0F;
|
||||||
|
onChanged();
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
|
||||||
// @@protoc_insertion_point(builder_scope:RegionLoad)
|
// @@protoc_insertion_point(builder_scope:RegionLoad)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -10350,7 +10472,7 @@ public final class ClusterStatusProtos {
|
||||||
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
|
"PLITTING_NEW\020\r\022\017\n\013MERGING_NEW\020\016\"X\n\022Regio",
|
||||||
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
|
"nInTransition\022\036\n\004spec\030\001 \002(\0132\020.RegionSpec" +
|
||||||
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
|
"ifier\022\"\n\014region_state\030\002 \002(\0132\014.RegionStat" +
|
||||||
"e\"\320\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
|
"e\"\347\003\n\nRegionLoad\022*\n\020region_specifier\030\001 \002" +
|
||||||
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
|
"(\0132\020.RegionSpecifier\022\016\n\006stores\030\002 \001(\r\022\022\n\n" +
|
||||||
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
|
"storefiles\030\003 \001(\r\022\"\n\032store_uncompressed_s" +
|
||||||
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
|
"ize_MB\030\004 \001(\r\022\031\n\021storefile_size_MB\030\005 \001(\r\022" +
|
||||||
|
@ -10361,27 +10483,27 @@ public final class ClusterStatusProtos {
|
||||||
"ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" +
|
"ompacted_KVs\030\013 \001(\004\022\032\n\022root_index_size_KB" +
|
||||||
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
|
"\030\014 \001(\r\022\"\n\032total_static_index_size_KB\030\r \001" +
|
||||||
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
|
"(\r\022\"\n\032total_static_bloom_size_KB\030\016 \001(\r\022\034" +
|
||||||
"\n\024complete_sequence_id\030\017 \001(\004\"\212\002\n\nServerL" +
|
"\n\024complete_sequence_id\030\017 \001(\004\022\025\n\rdata_loc" +
|
||||||
"oad\022\032\n\022number_of_requests\030\001 \001(\r\022 \n\030total" +
|
"ality\030\020 \001(\002\"\212\002\n\nServerLoad\022\032\n\022number_of_" +
|
||||||
"_number_of_requests\030\002 \001(\r\022\024\n\014used_heap_M" +
|
"requests\030\001 \001(\r\022 \n\030total_number_of_reques" +
|
||||||
"B\030\003 \001(\r\022\023\n\013max_heap_MB\030\004 \001(\r\022!\n\014region_l" +
|
"ts\030\002 \001(\r\022\024\n\014used_heap_MB\030\003 \001(\r\022\023\n\013max_he" +
|
||||||
"oads\030\005 \003(\0132\013.RegionLoad\022\"\n\014coprocessors\030" +
|
"ap_MB\030\004 \001(\r\022!\n\014region_loads\030\005 \003(\0132\013.Regi" +
|
||||||
"\006 \003(\0132\014.Coprocessor\022\031\n\021report_start_time" +
|
"onLoad\022\"\n\014coprocessors\030\006 \003(\0132\014.Coprocess" +
|
||||||
"\030\007 \001(\004\022\027\n\017report_end_time\030\010 \001(\004\022\030\n\020info_",
|
"or\022\031\n\021report_start_time\030\007 \001(\004\022\027\n\017report_",
|
||||||
"server_port\030\t \001(\r\"O\n\016LiveServerInfo\022\033\n\006s" +
|
"end_time\030\010 \001(\004\022\030\n\020info_server_port\030\t \001(\r" +
|
||||||
"erver\030\001 \002(\0132\013.ServerName\022 \n\013server_load\030" +
|
"\"O\n\016LiveServerInfo\022\033\n\006server\030\001 \002(\0132\013.Ser" +
|
||||||
"\002 \002(\0132\013.ServerLoad\"\340\002\n\rClusterStatus\022/\n\r" +
|
"verName\022 \n\013server_load\030\002 \002(\0132\013.ServerLoa" +
|
||||||
"hbase_version\030\001 \001(\0132\030.HBaseVersionFileCo" +
|
"d\"\340\002\n\rClusterStatus\022/\n\rhbase_version\030\001 \001" +
|
||||||
"ntent\022%\n\014live_servers\030\002 \003(\0132\017.LiveServer" +
|
"(\0132\030.HBaseVersionFileContent\022%\n\014live_ser" +
|
||||||
"Info\022!\n\014dead_servers\030\003 \003(\0132\013.ServerName\022" +
|
"vers\030\002 \003(\0132\017.LiveServerInfo\022!\n\014dead_serv" +
|
||||||
"2\n\025regions_in_transition\030\004 \003(\0132\023.RegionI" +
|
"ers\030\003 \003(\0132\013.ServerName\0222\n\025regions_in_tra" +
|
||||||
"nTransition\022\036\n\ncluster_id\030\005 \001(\0132\n.Cluste" +
|
"nsition\030\004 \003(\0132\023.RegionInTransition\022\036\n\ncl" +
|
||||||
"rId\022)\n\023master_coprocessors\030\006 \003(\0132\014.Copro" +
|
"uster_id\030\005 \001(\0132\n.ClusterId\022)\n\023master_cop" +
|
||||||
"cessor\022\033\n\006master\030\007 \001(\0132\013.ServerName\022#\n\016b",
|
"rocessors\030\006 \003(\0132\014.Coprocessor\022\033\n\006master\030",
|
||||||
"ackup_masters\030\010 \003(\0132\013.ServerName\022\023\n\013bala" +
|
"\007 \001(\0132\013.ServerName\022#\n\016backup_masters\030\010 \003" +
|
||||||
"ncer_on\030\t \001(\010BF\n*org.apache.hadoop.hbase" +
|
"(\0132\013.ServerName\022\023\n\013balancer_on\030\t \001(\010BF\n*" +
|
||||||
".protobuf.generatedB\023ClusterStatusProtos" +
|
"org.apache.hadoop.hbase.protobuf.generat" +
|
||||||
"H\001\240\001\001"
|
"edB\023ClusterStatusProtosH\001\240\001\001"
|
||||||
};
|
};
|
||||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||||
|
@ -10405,7 +10527,7 @@ public final class ClusterStatusProtos {
|
||||||
internal_static_RegionLoad_fieldAccessorTable = new
|
internal_static_RegionLoad_fieldAccessorTable = new
|
||||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||||
internal_static_RegionLoad_descriptor,
|
internal_static_RegionLoad_descriptor,
|
||||||
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", });
|
new java.lang.String[] { "RegionSpecifier", "Stores", "Storefiles", "StoreUncompressedSizeMB", "StorefileSizeMB", "MemstoreSizeMB", "StorefileIndexSizeMB", "ReadRequestsCount", "WriteRequestsCount", "TotalCompactingKVs", "CurrentCompactedKVs", "RootIndexSizeKB", "TotalStaticIndexSizeKB", "TotalStaticBloomSizeKB", "CompleteSequenceId", "DataLocality", });
|
||||||
internal_static_ServerLoad_descriptor =
|
internal_static_ServerLoad_descriptor =
|
||||||
getDescriptor().getMessageTypes().get(3);
|
getDescriptor().getMessageTypes().get(3);
|
||||||
internal_static_ServerLoad_fieldAccessorTable = new
|
internal_static_ServerLoad_fieldAccessorTable = new
|
||||||
|
|
|
@ -110,6 +110,9 @@ message RegionLoad {
|
||||||
|
|
||||||
/** the most recent sequence Id from cache flush */
|
/** the most recent sequence Id from cache flush */
|
||||||
optional uint64 complete_sequence_id = 15;
|
optional uint64 complete_sequence_id = 15;
|
||||||
|
|
||||||
|
/** The current data locality for region in the regionserver */
|
||||||
|
optional float data_locality = 16;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Server-level protobufs */
|
/* Server-level protobufs */
|
||||||
|
|
|
@ -154,6 +154,7 @@
|
||||||
<th>Storefile Size</th>
|
<th>Storefile Size</th>
|
||||||
<th>Index Size</th>
|
<th>Index Size</th>
|
||||||
<th>Bloom Size</th>
|
<th>Bloom Size</th>
|
||||||
|
<th>Data Locality</th>
|
||||||
</tr>
|
</tr>
|
||||||
|
|
||||||
<%for HRegionInfo r: onlineRegions %>
|
<%for HRegionInfo r: onlineRegions %>
|
||||||
|
@ -171,6 +172,7 @@
|
||||||
<td><% load.getStorefileSizeMB() %>m</td>
|
<td><% load.getStorefileSizeMB() %>m</td>
|
||||||
<td><% load.getTotalStaticIndexSizeKB() %>k</td>
|
<td><% load.getTotalStaticIndexSizeKB() %>k</td>
|
||||||
<td><% load.getTotalStaticBloomSizeKB() %>k</td>
|
<td><% load.getTotalStaticBloomSizeKB() %>k</td>
|
||||||
|
<td><% load.getDataLocality() %></td>
|
||||||
</%if>
|
</%if>
|
||||||
</tr>
|
</tr>
|
||||||
</%for>
|
</%for>
|
||||||
|
|
|
@ -1357,6 +1357,8 @@ public class HRegionServer extends HasThread implements
|
||||||
(int) (store.getTotalStaticBloomSize() / 1024);
|
(int) (store.getTotalStaticBloomSize() / 1024);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
float dataLocality =
|
||||||
|
r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname());
|
||||||
if (regionLoadBldr == null) {
|
if (regionLoadBldr == null) {
|
||||||
regionLoadBldr = RegionLoad.newBuilder();
|
regionLoadBldr = RegionLoad.newBuilder();
|
||||||
}
|
}
|
||||||
|
@ -1379,7 +1381,8 @@ public class HRegionServer extends HasThread implements
|
||||||
.setWriteRequestsCount(r.writeRequestsCount.get())
|
.setWriteRequestsCount(r.writeRequestsCount.get())
|
||||||
.setTotalCompactingKVs(totalCompactingKVs)
|
.setTotalCompactingKVs(totalCompactingKVs)
|
||||||
.setCurrentCompactedKVs(currentCompactedKVs)
|
.setCurrentCompactedKVs(currentCompactedKVs)
|
||||||
.setCompleteSequenceId(r.lastFlushSeqId);
|
.setCompleteSequenceId(r.lastFlushSeqId)
|
||||||
|
.setDataLocality(dataLocality);
|
||||||
|
|
||||||
return regionLoadBldr.build();
|
return regionLoadBldr.build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,10 +48,10 @@
|
||||||
String tableHeader;
|
String tableHeader;
|
||||||
boolean withReplica = false;
|
boolean withReplica = false;
|
||||||
if (table.getTableDescriptor().getRegionReplication() > 1) {
|
if (table.getTableDescriptor().getRegionReplication() > 1) {
|
||||||
tableHeader = "<h2>Table Regions</h2><table class=\"table table-striped\"><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th><th>Requests</th><th>ReplicaID</th></tr>";
|
tableHeader = "<h2>Table Regions</h2><table class=\"table table-striped\"><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th><th>Locality</th><th>Requests</th><th>ReplicaID</th></tr>";
|
||||||
withReplica = true;
|
withReplica = true;
|
||||||
} else {
|
} else {
|
||||||
tableHeader = "<h2>Table Regions</h2><table class=\"table table-striped\"><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th><th>Requests</th></tr>";
|
tableHeader = "<h2>Table Regions</h2><table class=\"table table-striped\"><tr><th>Name</th><th>Region Server</th><th>Start Key</th><th>End Key</th><th>Locality</th><th>Requests</th></tr>";
|
||||||
}
|
}
|
||||||
ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
|
ServerName rl = metaTableLocator.getMetaRegionLocation(master.getZooKeeper());
|
||||||
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
|
boolean showFragmentation = conf.getBoolean("hbase.master.ui.fragmentation.enabled", false);
|
||||||
|
@ -212,9 +212,10 @@
|
||||||
<tr>
|
<tr>
|
||||||
<td><%= escapeXml(meta.getRegionNameAsString()) %></td>
|
<td><%= escapeXml(meta.getRegionNameAsString()) %></td>
|
||||||
<td><a href="<%= url %>"><%= metaLocation.getHostname().toString() + ":" + master.getRegionServerInfoPort(metaLocation) %></a></td>
|
<td><a href="<%= url %>"><%= metaLocation.getHostname().toString() + ":" + master.getRegionServerInfoPort(metaLocation) %></a></td>
|
||||||
<td>-</td>
|
|
||||||
<td><%= escapeXml(Bytes.toString(meta.getStartKey())) %></td>
|
<td><%= escapeXml(Bytes.toString(meta.getStartKey())) %></td>
|
||||||
<td><%= escapeXml(Bytes.toString(meta.getEndKey())) %></td>
|
<td><%= escapeXml(Bytes.toString(meta.getEndKey())) %></td>
|
||||||
|
<td>-</td>
|
||||||
|
<td>-</td>
|
||||||
</tr>
|
</tr>
|
||||||
<% } %>
|
<% } %>
|
||||||
</table>
|
</table>
|
||||||
|
@ -268,7 +269,7 @@
|
||||||
HRegionInfo regionInfo = hriEntry.getKey();
|
HRegionInfo regionInfo = hriEntry.getKey();
|
||||||
ServerName addr = hriEntry.getValue();
|
ServerName addr = hriEntry.getValue();
|
||||||
long req = 0;
|
long req = 0;
|
||||||
|
float locality = 0.0f;
|
||||||
String urlRegionServer = null;
|
String urlRegionServer = null;
|
||||||
|
|
||||||
if (addr != null) {
|
if (addr != null) {
|
||||||
|
@ -277,6 +278,7 @@
|
||||||
Map<byte[], RegionLoad> map = sl.getRegionsLoad();
|
Map<byte[], RegionLoad> map = sl.getRegionsLoad();
|
||||||
if (map.containsKey(regionInfo.getRegionName())) {
|
if (map.containsKey(regionInfo.getRegionName())) {
|
||||||
req = map.get(regionInfo.getRegionName()).getRequestsCount();
|
req = map.get(regionInfo.getRegionName()).getRequestsCount();
|
||||||
|
locality = map.get(regionInfo.getRegionName()).getDataLocality();
|
||||||
}
|
}
|
||||||
Integer i = regDistribution.get(addr);
|
Integer i = regDistribution.get(addr);
|
||||||
if (null == i) i = Integer.valueOf(0);
|
if (null == i) i = Integer.valueOf(0);
|
||||||
|
@ -305,6 +307,7 @@
|
||||||
conf))) %></td>
|
conf))) %></td>
|
||||||
<td><%= escapeXml(Bytes.toStringBinary(HRegionInfo.getEndKeyForDisplay(regionInfo,
|
<td><%= escapeXml(Bytes.toStringBinary(HRegionInfo.getEndKeyForDisplay(regionInfo,
|
||||||
conf))) %></td>
|
conf))) %></td>
|
||||||
|
<td><%= locality%></td>
|
||||||
<td><%= req%></td>
|
<td><%= req%></td>
|
||||||
<%
|
<%
|
||||||
if (withReplica) {
|
if (withReplica) {
|
||||||
|
|
Loading…
Reference in New Issue