HBASE-24038 Add a metric to show the locality of ssd in table.jsp (#1337)
Signed-off-by: Wellington Chevreuil <wchevreuil@apache.org> Signed-off-by: Viraj Jasani <vjasani@apache.org>
This commit is contained in:
parent
d6e9c3164d
commit
b83d38fb94
|
@ -160,4 +160,10 @@ public interface RegionMetrics {
|
|||
* of this region
|
||||
*/
|
||||
int getMaxCompactedStoreFileRefCount();
|
||||
|
||||
/**
|
||||
* Different from dataLocality,this metric's numerator only include the data stored on ssd
|
||||
* @return the data locality for ssd of region in the regionserver
|
||||
*/
|
||||
float getDataLocalityForSsd();
|
||||
}
|
||||
|
|
|
@ -51,6 +51,8 @@ public final class RegionMetricsBuilder {
|
|||
.setCompactingCellCount(regionLoadPB.getTotalCompactingKVs())
|
||||
.setCompletedSequenceId(regionLoadPB.getCompleteSequenceId())
|
||||
.setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f)
|
||||
.setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ?
|
||||
regionLoadPB.getDataLocalityForSsd() : 0.0f)
|
||||
.setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount())
|
||||
.setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(),
|
||||
Size.Unit.KILOBYTE))
|
||||
|
@ -148,6 +150,7 @@ public final class RegionMetricsBuilder {
|
|||
private Map<byte[], Long> storeSequenceIds = Collections.emptyMap();
|
||||
private float dataLocality;
|
||||
private long lastMajorCompactionTimestamp;
|
||||
private float dataLocalityForSsd;
|
||||
private RegionMetricsBuilder(byte[] name) {
|
||||
this.name = name;
|
||||
}
|
||||
|
@ -236,6 +239,10 @@ public final class RegionMetricsBuilder {
|
|||
this.lastMajorCompactionTimestamp = value;
|
||||
return this;
|
||||
}
|
||||
public RegionMetricsBuilder setDataLocalityForSsd(float value) {
|
||||
this.dataLocalityForSsd = value;
|
||||
return this;
|
||||
}
|
||||
|
||||
public RegionMetrics build() {
|
||||
return new RegionMetricsImpl(name,
|
||||
|
@ -259,7 +266,8 @@ public final class RegionMetricsBuilder {
|
|||
completedSequenceId,
|
||||
storeSequenceIds,
|
||||
dataLocality,
|
||||
lastMajorCompactionTimestamp);
|
||||
lastMajorCompactionTimestamp,
|
||||
dataLocalityForSsd);
|
||||
}
|
||||
|
||||
private static class RegionMetricsImpl implements RegionMetrics {
|
||||
|
@ -285,6 +293,7 @@ public final class RegionMetricsBuilder {
|
|||
private final Map<byte[], Long> storeSequenceIds;
|
||||
private final float dataLocality;
|
||||
private final long lastMajorCompactionTimestamp;
|
||||
private final float dataLocalityForSsd;
|
||||
RegionMetricsImpl(byte[] name,
|
||||
int storeCount,
|
||||
int storeFileCount,
|
||||
|
@ -306,7 +315,8 @@ public final class RegionMetricsBuilder {
|
|||
long completedSequenceId,
|
||||
Map<byte[], Long> storeSequenceIds,
|
||||
float dataLocality,
|
||||
long lastMajorCompactionTimestamp) {
|
||||
long lastMajorCompactionTimestamp,
|
||||
float dataLocalityForSsd) {
|
||||
this.name = Preconditions.checkNotNull(name);
|
||||
this.storeCount = storeCount;
|
||||
this.storeFileCount = storeFileCount;
|
||||
|
@ -329,6 +339,7 @@ public final class RegionMetricsBuilder {
|
|||
this.storeSequenceIds = Preconditions.checkNotNull(storeSequenceIds);
|
||||
this.dataLocality = dataLocality;
|
||||
this.lastMajorCompactionTimestamp = lastMajorCompactionTimestamp;
|
||||
this.dataLocalityForSsd = dataLocalityForSsd;
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -441,6 +452,11 @@ public final class RegionMetricsBuilder {
|
|||
return lastMajorCompactionTimestamp;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getDataLocalityForSsd() {
|
||||
return dataLocalityForSsd;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String toString() {
|
||||
StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount",
|
||||
|
@ -492,6 +508,8 @@ public final class RegionMetricsBuilder {
|
|||
this.getCompletedSequenceId());
|
||||
Strings.appendKeyValue(sb, "dataLocality",
|
||||
this.getDataLocality());
|
||||
Strings.appendKeyValue(sb, "dataLocalityForSsd",
|
||||
this.getDataLocalityForSsd());
|
||||
return sb.toString();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -155,6 +155,9 @@ message RegionLoad {
|
|||
* that belong to given region
|
||||
*/
|
||||
optional int32 max_compacted_store_file_ref_count = 22 [default = 0];
|
||||
|
||||
/** The current data locality for ssd for region in the regionserver */
|
||||
optional float data_locality_for_ssd = 23;
|
||||
}
|
||||
|
||||
message UserLoad {
|
||||
|
|
|
@ -25,7 +25,7 @@ import java.util.Map;
|
|||
import java.util.NavigableSet;
|
||||
import java.util.TreeMap;
|
||||
import java.util.TreeSet;
|
||||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.yetus.audience.InterfaceAudience;
|
||||
|
||||
|
||||
|
@ -53,23 +53,28 @@ public class HDFSBlocksDistribution {
|
|||
|
||||
private String host;
|
||||
private long weight;
|
||||
private long weightForSsd;
|
||||
|
||||
/**
|
||||
* Constructor
|
||||
* @param host the host name
|
||||
* @param weight the weight
|
||||
* @param weightForSsd the weight for ssd
|
||||
*/
|
||||
public HostAndWeight(String host, long weight) {
|
||||
public HostAndWeight(String host, long weight, long weightForSsd) {
|
||||
this.host = host;
|
||||
this.weight = weight;
|
||||
this.weightForSsd = weightForSsd;
|
||||
}
|
||||
|
||||
/**
|
||||
* add weight
|
||||
* @param weight the weight
|
||||
* @param weightForSsd the weight for ssd
|
||||
*/
|
||||
public void addWeight(long weight) {
|
||||
public void addWeight(long weight, long weightForSsd) {
|
||||
this.weight += weight;
|
||||
this.weightForSsd += weightForSsd;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -86,6 +91,13 @@ public class HDFSBlocksDistribution {
|
|||
return weight;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the weight for ssd
|
||||
*/
|
||||
public long getWeightForSsd() {
|
||||
return weightForSsd;
|
||||
}
|
||||
|
||||
/**
|
||||
* comparator used to sort hosts based on weight
|
||||
*/
|
||||
|
@ -122,14 +134,33 @@ public class HDFSBlocksDistribution {
|
|||
* @param weight the weight
|
||||
*/
|
||||
public void addHostsAndBlockWeight(String[] hosts, long weight) {
|
||||
addHostsAndBlockWeight(hosts, weight, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* add some weight to a list of hosts, update the value of unique block weight
|
||||
* @param hosts the list of the host
|
||||
* @param weight the weight
|
||||
*/
|
||||
public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) {
|
||||
if (hosts == null || hosts.length == 0) {
|
||||
// erroneous data
|
||||
return;
|
||||
}
|
||||
|
||||
addUniqueWeight(weight);
|
||||
for (String hostname : hosts) {
|
||||
addHostAndBlockWeight(hostname, weight);
|
||||
if (storageTypes != null && storageTypes.length == hosts.length) {
|
||||
for (int i = 0; i < hosts.length; i++) {
|
||||
long weightForSsd = 0;
|
||||
if (storageTypes[i] == StorageType.SSD) {
|
||||
weightForSsd = weight;
|
||||
}
|
||||
addHostAndBlockWeight(hosts[i], weight, weightForSsd);
|
||||
}
|
||||
} else {
|
||||
for (String hostname : hosts) {
|
||||
addHostAndBlockWeight(hostname, weight, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,13 +172,13 @@ public class HDFSBlocksDistribution {
|
|||
uniqueBlocksTotalWeight += weight;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* add some weight to a specific host
|
||||
* @param host the host name
|
||||
* @param weight the weight
|
||||
* @param weightForSsd the weight for ssd
|
||||
*/
|
||||
private void addHostAndBlockWeight(String host, long weight) {
|
||||
private void addHostAndBlockWeight(String host, long weight, long weightForSsd) {
|
||||
if (host == null) {
|
||||
// erroneous data
|
||||
return;
|
||||
|
@ -155,10 +186,10 @@ public class HDFSBlocksDistribution {
|
|||
|
||||
HostAndWeight hostAndWeight = this.hostAndWeights.get(host);
|
||||
if(hostAndWeight == null) {
|
||||
hostAndWeight = new HostAndWeight(host, weight);
|
||||
hostAndWeight = new HostAndWeight(host, weight, weightForSsd);
|
||||
this.hostAndWeights.put(host, hostAndWeight);
|
||||
} else {
|
||||
hostAndWeight.addWeight(weight);
|
||||
hostAndWeight.addWeight(weight, weightForSsd);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,20 +225,43 @@ public class HDFSBlocksDistribution {
|
|||
}
|
||||
|
||||
/**
|
||||
* return the locality index of a given host
|
||||
* Implementations 'visit' hostAndWeight.
|
||||
*/
|
||||
public interface Visitor {
|
||||
float visit(final HostAndWeight hostAndWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param host the host name
|
||||
* @return the locality index of the given host
|
||||
*/
|
||||
public float getBlockLocalityIndex(String host) {
|
||||
return getBlockLocalityIndexInternal(host,
|
||||
e -> (float) e.weight / (float) uniqueBlocksTotalWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param host the host name
|
||||
* @return the locality index with ssd of the given host
|
||||
*/
|
||||
public float getBlockLocalityIndexForSsd(String host) {
|
||||
return getBlockLocalityIndexInternal(host,
|
||||
e -> (float) e.weightForSsd / (float) uniqueBlocksTotalWeight);
|
||||
}
|
||||
|
||||
/**
|
||||
* @param host the host name
|
||||
* @return the locality index of the given host
|
||||
*/
|
||||
private float getBlockLocalityIndexInternal(String host, Visitor visitor) {
|
||||
float localityIndex = 0;
|
||||
HostAndWeight hostAndWeight = this.hostAndWeights.get(host);
|
||||
if (hostAndWeight != null && uniqueBlocksTotalWeight != 0) {
|
||||
localityIndex=(float)hostAndWeight.weight/(float)uniqueBlocksTotalWeight;
|
||||
localityIndex = visitor.visit(hostAndWeight);
|
||||
}
|
||||
return localityIndex;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This will add the distribution from input to this object
|
||||
* @param otherBlocksDistribution the other hdfs blocks distribution
|
||||
|
@ -218,7 +272,7 @@ public class HDFSBlocksDistribution {
|
|||
for (Map.Entry<String, HostAndWeight> otherHostAndWeight:
|
||||
otherHostAndWeights.entrySet()) {
|
||||
addHostAndBlockWeight(otherHostAndWeight.getValue().host,
|
||||
otherHostAndWeight.getValue().weight);
|
||||
otherHostAndWeight.getValue().weight, otherHostAndWeight.getValue().weightForSsd);
|
||||
}
|
||||
addUniqueWeight(otherBlocksDistribution.getUniqueBlocksTotalWeight());
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.ExecutorStatusChore;
|
|||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HBaseInterfaceAudience;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HDFSBlocksDistribution;
|
||||
import org.apache.hadoop.hbase.HealthCheckChore;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
|
@ -1692,8 +1693,9 @@ public class HRegionServer extends Thread implements
|
|||
totalStaticBloomSizeKB += (int) (store.getTotalStaticBloomSize() / 1024);
|
||||
}
|
||||
|
||||
float dataLocality =
|
||||
r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname());
|
||||
HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution();
|
||||
float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname());
|
||||
float dataLocalityForSsd = hdfsBd.getBlockLocalityIndexForSsd(serverName.getHostname());
|
||||
if (regionLoadBldr == null) {
|
||||
regionLoadBldr = RegionLoad.newBuilder();
|
||||
}
|
||||
|
@ -1721,6 +1723,7 @@ public class HRegionServer extends Thread implements
|
|||
.setTotalCompactingKVs(totalCompactingKVs)
|
||||
.setCurrentCompactedKVs(currentCompactedKVs)
|
||||
.setDataLocality(dataLocality)
|
||||
.setDataLocalityForSsd(dataLocalityForSsd)
|
||||
.setLastMajorCompactionTs(r.getOldestHfileTs(true));
|
||||
r.setCompleteSequenceId(regionLoadBldr);
|
||||
|
||||
|
|
|
@ -61,6 +61,7 @@ import org.apache.hadoop.fs.FileSystem;
|
|||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.PathFilter;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.ClusterId;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
|
@ -729,12 +730,7 @@ public final class FSUtils {
|
|||
HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution();
|
||||
BlockLocation [] blockLocations =
|
||||
fs.getFileBlockLocations(status, start, length);
|
||||
for(BlockLocation bl : blockLocations) {
|
||||
String [] hosts = bl.getHosts();
|
||||
long len = bl.getLength();
|
||||
blocksDistribution.addHostsAndBlockWeight(hosts, len);
|
||||
}
|
||||
|
||||
addToHDFSBlocksDistribution(blocksDistribution, blockLocations);
|
||||
return blocksDistribution;
|
||||
}
|
||||
|
||||
|
@ -749,7 +745,8 @@ public final class FSUtils {
|
|||
for (BlockLocation bl : blockLocations) {
|
||||
String[] hosts = bl.getHosts();
|
||||
long len = bl.getLength();
|
||||
blocksDistribution.addHostsAndBlockWeight(hosts, len);
|
||||
StorageType[] storageTypes = bl.getStorageTypes();
|
||||
blocksDistribution.addHostsAndBlockWeight(hosts, len, storageTypes);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -227,6 +227,9 @@ if (fqtn != null && master.isInitialized()) {
|
|||
<li class="active">
|
||||
<a href="#metaTab_baseStats" data-toggle="tab">Base Stats</a>
|
||||
</li>
|
||||
<li class="">
|
||||
<a href="#metaTab_localityStats" data-toggle="tab">Localities</a>
|
||||
</li>
|
||||
<li class="">
|
||||
<a href="#metaTab_compactStats" data-toggle="tab">Compactions</a>
|
||||
</li>
|
||||
|
@ -243,7 +246,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
<th>StorefileSize</th>
|
||||
<th>Num.Storefiles</th>
|
||||
<th>MemSize</th>
|
||||
<th>Locality</th>
|
||||
<th>Start Key</th>
|
||||
<th>End Key</th>
|
||||
<%
|
||||
|
@ -269,7 +271,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
String fileSize = ZEROMB;
|
||||
String fileCount = "N/A";
|
||||
String memSize = ZEROMB;
|
||||
float locality = 0.0f;
|
||||
|
||||
if (metaLocation != null) {
|
||||
ServerMetrics sl = master.getServerManager().getLoad(metaLocation);
|
||||
|
@ -290,7 +291,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
if (mSize > 0) {
|
||||
memSize = StringUtils.byteDesc((long)mSize);
|
||||
}
|
||||
locality = load.getDataLocality();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -303,7 +303,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
<td><%= fileSize%></td>
|
||||
<td><%= fileCount%></td>
|
||||
<td><%= memSize%></td>
|
||||
<td><%= locality%></td>
|
||||
<td><%= escapeXml(Bytes.toString(meta.getStartKey())) %></td>
|
||||
<td><%= escapeXml(Bytes.toString(meta.getEndKey())) %></td>
|
||||
<%
|
||||
|
@ -319,6 +318,52 @@ if (fqtn != null && master.isInitialized()) {
|
|||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="tab-pane" id="metaTab_localityStats">
|
||||
<table id="tableRegionTable" class="tablesorter table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name</th>
|
||||
<th>Region Server</th>
|
||||
<th>Locality</th>
|
||||
<th>LocalityForSsd</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<%
|
||||
// NOTE: Presumes meta with one or more replicas
|
||||
for (int j = 0; j < numMetaReplicas; j++) {
|
||||
RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica(
|
||||
RegionInfoBuilder.FIRST_META_REGIONINFO, j);
|
||||
ServerName metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1);
|
||||
for (int i = 0; i < 1; i++) {
|
||||
String hostAndPort = "";
|
||||
float locality = 0.0f;
|
||||
float localityForSsd = 0.0f;
|
||||
|
||||
if (metaLocation != null) {
|
||||
ServerMetrics sl = master.getServerManager().getLoad(metaLocation);
|
||||
hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation);
|
||||
if (sl != null) {
|
||||
Map<byte[], RegionMetrics> map = sl.getRegionMetrics();
|
||||
if (map.containsKey(meta.getRegionName())) {
|
||||
RegionMetrics load = map.get(meta.getRegionName());
|
||||
locality = load.getDataLocality();
|
||||
localityForSsd = load.getDataLocalityForSsd();
|
||||
}
|
||||
}
|
||||
}
|
||||
%>
|
||||
<tr>
|
||||
<td><%= escapeXml(meta.getRegionNameAsString()) %></td>
|
||||
<td><a href="http://<%= hostAndPort %>/rs-status"><%= StringEscapeUtils.escapeHtml4(hostAndPort) %></a></td>
|
||||
<td><%= locality%></td>
|
||||
<td><%= localityForSsd%></td>
|
||||
</tr>
|
||||
<% } %>
|
||||
<%} %>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="tab-pane" id="metaTab_compactStats">
|
||||
<table id="metaTableCompactStatsTable" class="tablesorter table table-striped">
|
||||
<thead>
|
||||
|
@ -778,6 +823,9 @@ if (fqtn != null && master.isInitialized()) {
|
|||
<li class="active">
|
||||
<a href="#tab_baseStats" data-toggle="tab">Base Stats</a>
|
||||
</li>
|
||||
<li class="">
|
||||
<a href="#tab_localityStats" data-toggle="tab">Localities</a>
|
||||
</li>
|
||||
<li class="">
|
||||
<a href="#tab_compactStats" data-toggle="tab">Compactions</a>
|
||||
</li>
|
||||
|
@ -794,7 +842,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
<th>StorefileSize<br>(<%= totalSizeStr %>)</th>
|
||||
<th>Num.Storefiles<br>(<%= String.format("%,1d", totalStoreFileCount)%>)</th>
|
||||
<th>MemSize<br>(<%= totalMemSizeStr %>)</th>
|
||||
<th>Locality</th>
|
||||
<th>Start Key</th>
|
||||
<th>End Key</th>
|
||||
<th>Region State</th>
|
||||
|
@ -826,7 +873,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
String regionSize = ZEROMB;
|
||||
String fileCount = "N/A";
|
||||
String memSize = ZEROMB;
|
||||
float locality = 0.0f;
|
||||
String state = "N/A";
|
||||
if (load != null) {
|
||||
readReq = String.format("%,1d", load.getReadRequestCount());
|
||||
|
@ -840,7 +886,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
if (mSize > 0) {
|
||||
memSize = StringUtils.byteDesc((long)mSize);
|
||||
}
|
||||
locality = load.getDataLocality();
|
||||
}
|
||||
|
||||
if (stateMap.containsKey(regionInfo.getEncodedName())) {
|
||||
|
@ -886,7 +931,6 @@ if (fqtn != null && master.isInitialized()) {
|
|||
<td><%= regionSize%></td>
|
||||
<td><%= fileCount%></td>
|
||||
<td><%= memSize%></td>
|
||||
<td><%= locality%></td>
|
||||
<td><%= escapeXml(Bytes.toStringBinary(regionInfo.getStartKey()))%></td>
|
||||
<td><%= escapeXml(Bytes.toStringBinary(regionInfo.getEndKey()))%></td>
|
||||
<td><%= state%></td>
|
||||
|
@ -910,6 +954,64 @@ if (fqtn != null && master.isInitialized()) {
|
|||
here</a> to see all regions.</p>
|
||||
<% } %>
|
||||
</div>
|
||||
<div class="tab-pane" id="tab_localityStats">
|
||||
<table id="regionServerDetailsTable" class="tablesorter table table-striped">
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Name(<%= String.format("%,1d", regions.size())%>)</th>
|
||||
<th>Region Server</th>
|
||||
<th>Locality</th>
|
||||
<th>LocalityForSsd</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<%
|
||||
numRegionsRendered = 0;
|
||||
for (Map.Entry<RegionInfo, RegionMetrics> hriEntry : entryList) {
|
||||
RegionInfo regionInfo = hriEntry.getKey();
|
||||
ServerName addr = regionsToServer.get(regionInfo);
|
||||
RegionMetrics load = hriEntry.getValue();
|
||||
String urlRegionServer = null;
|
||||
float locality = 0.0f;
|
||||
float localityForSsd = 0.0f;
|
||||
String state = "N/A";
|
||||
if (load != null) {
|
||||
locality = load.getDataLocality();
|
||||
localityForSsd = load.getDataLocalityForSsd();
|
||||
}
|
||||
|
||||
if (addr != null) {
|
||||
// This port might be wrong if RS actually ended up using something else.
|
||||
urlRegionServer =
|
||||
"//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/rs-status";
|
||||
}
|
||||
|
||||
if (numRegionsRendered < numRegionsToRender) {
|
||||
numRegionsRendered++;
|
||||
%>
|
||||
<tr>
|
||||
<td><%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %></td>
|
||||
<%
|
||||
if (urlRegionServer != null) {
|
||||
%>
|
||||
<td>
|
||||
<a href="<%= urlRegionServer %>"><%= addr == null? "-": StringEscapeUtils.escapeHtml4(addr.getHostname().toString()) + ":" + master.getRegionServerInfoPort(addr) %></a>
|
||||
</td>
|
||||
<%
|
||||
} else {
|
||||
%>
|
||||
<td class="undeployed-region">not deployed</td>
|
||||
<%
|
||||
}
|
||||
%>
|
||||
<td><%= locality%></td>
|
||||
<td><%= localityForSsd%></td>
|
||||
</tr>
|
||||
<% } %>
|
||||
<% } %>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
<div class="tab-pane" id="tab_compactStats">
|
||||
<table id="tableCompactStatsTable" class="tablesorter table table-striped">
|
||||
<thead>
|
||||
|
|
|
@ -21,6 +21,8 @@ import static junit.framework.Assert.assertEquals;
|
|||
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.ClassRule;
|
||||
|
@ -50,13 +52,19 @@ public class TestHDFSBlocksDistribution {
|
|||
distribution.addHostsAndBlockWeight(new String[] {"testTwo"}, 222);
|
||||
assertEquals("Should be two hosts", 2, distribution.getHostAndWeights().size());
|
||||
assertEquals("Total weight should be 525", 525, distribution.getUniqueBlocksTotalWeight());
|
||||
distribution.addHostsAndBlockWeight(new String[] {"test"}, 100
|
||||
, new StorageType[] { StorageType.SSD});
|
||||
assertEquals("test host should have weight 403", 403
|
||||
, distribution.getHostAndWeights().get("test").getWeight());
|
||||
assertEquals("test host should have weight for ssd 100", 100
|
||||
, distribution.getHostAndWeights().get("test").getWeightForSsd());
|
||||
}
|
||||
|
||||
public class MockHDFSBlocksDistribution extends HDFSBlocksDistribution {
|
||||
@Override
|
||||
public Map<String,HostAndWeight> getHostAndWeights() {
|
||||
HashMap<String, HostAndWeight> map = new HashMap<>();
|
||||
map.put("test", new HostAndWeight(null, 100));
|
||||
map.put("test", new HostAndWeight(null, 100, 0));
|
||||
return map;
|
||||
}
|
||||
|
||||
|
|
|
@ -493,6 +493,10 @@ public class TestRegionsRecoveryChore {
|
|||
return compactedStoreRefCount;
|
||||
}
|
||||
|
||||
@Override
|
||||
public float getDataLocalityForSsd() {
|
||||
return 0;
|
||||
}
|
||||
};
|
||||
return regionMetrics;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue