From b83d38fb943e98a0916ea0c639b1709dd972076a Mon Sep 17 00:00:00 2001 From: bsglz <18031031@qq.com> Date: Sun, 14 Jun 2020 22:39:48 +0530 Subject: [PATCH] HBASE-24038 Add a metric to show the locality of ssd in table.jsp (#1337) Signed-off-by: Wellington Chevreuil Signed-off-by: Viraj Jasani --- .../apache/hadoop/hbase/RegionMetrics.java | 6 + .../hadoop/hbase/RegionMetricsBuilder.java | 22 +++- .../main/protobuf/server/ClusterStatus.proto | 3 + .../hadoop/hbase/HDFSBlocksDistribution.java | 80 ++++++++++-- .../hbase/regionserver/HRegionServer.java | 7 +- .../org/apache/hadoop/hbase/util/FSUtils.java | 11 +- .../resources/hbase-webapps/master/table.jsp | 118 ++++++++++++++++-- .../hbase/TestHDFSBlocksDistribution.java | 10 +- .../master/TestRegionsRecoveryChore.java | 4 + 9 files changed, 228 insertions(+), 33 deletions(-) diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index ddc64c3064f..27ae3e82b47 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -160,4 +160,10 @@ public interface RegionMetrics { * of this region */ int getMaxCompactedStoreFileRefCount(); + + /** + * Different from dataLocality,this metric's numerator only include the data stored on ssd + * @return the data locality for ssd of region in the regionserver + */ + float getDataLocalityForSsd(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index 05e33deae05..89801b39fc7 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -51,6 +51,8 @@ public final class RegionMetricsBuilder { .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) + .setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ? + regionLoadPB.getDataLocalityForSsd() : 0.0f) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) @@ -148,6 +150,7 @@ public final class RegionMetricsBuilder { private Map storeSequenceIds = Collections.emptyMap(); private float dataLocality; private long lastMajorCompactionTimestamp; + private float dataLocalityForSsd; private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -236,6 +239,10 @@ public final class RegionMetricsBuilder { this.lastMajorCompactionTimestamp = value; return this; } + public RegionMetricsBuilder setDataLocalityForSsd(float value) { + this.dataLocalityForSsd = value; + return this; + } public RegionMetrics build() { return new RegionMetricsImpl(name, @@ -259,7 +266,8 @@ public final class RegionMetricsBuilder { completedSequenceId, storeSequenceIds, dataLocality, - lastMajorCompactionTimestamp); + lastMajorCompactionTimestamp, + dataLocalityForSsd); } private static class RegionMetricsImpl implements RegionMetrics { @@ -285,6 +293,7 @@ public final class RegionMetricsBuilder { private final Map storeSequenceIds; private final float dataLocality; private final long lastMajorCompactionTimestamp; + private final float dataLocalityForSsd; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, @@ -306,7 +315,8 @@ public final class RegionMetricsBuilder { long completedSequenceId, Map storeSequenceIds, float dataLocality, - long lastMajorCompactionTimestamp) { + long lastMajorCompactionTimestamp, + float dataLocalityForSsd) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -329,6 +339,7 @@ public final class RegionMetricsBuilder { this.storeSequenceIds = Preconditions.checkNotNull(storeSequenceIds); this.dataLocality = dataLocality; this.lastMajorCompactionTimestamp = lastMajorCompactionTimestamp; + this.dataLocalityForSsd = dataLocalityForSsd; } @Override @@ -441,6 +452,11 @@ public final class RegionMetricsBuilder { return lastMajorCompactionTimestamp; } + @Override + public float getDataLocalityForSsd() { + return dataLocalityForSsd; + } + @Override public String toString() { StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", @@ -492,6 +508,8 @@ public final class RegionMetricsBuilder { this.getCompletedSequenceId()); Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); + Strings.appendKeyValue(sb, "dataLocalityForSsd", + this.getDataLocalityForSsd()); return sb.toString(); } } diff --git a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto index 874bc88cf7f..c62a7ba7424 100644 --- a/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/server/ClusterStatus.proto @@ -155,6 +155,9 @@ message RegionLoad { * that belong to given region */ optional int32 max_compacted_store_file_ref_count = 22 [default = 0]; + + /** The current data locality for ssd for region in the regionserver */ + optional float data_locality_for_ssd = 23; } message UserLoad { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index aa25fef452c..734a27ed60e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -25,7 +25,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.TreeMap; import java.util.TreeSet; - +import org.apache.hadoop.fs.StorageType; import org.apache.yetus.audience.InterfaceAudience; @@ -53,23 +53,28 @@ public class HDFSBlocksDistribution { private String host; private long weight; + private long weightForSsd; /** * Constructor * @param host the host name * @param weight the weight + * @param weightForSsd the weight for ssd */ - public HostAndWeight(String host, long weight) { + public HostAndWeight(String host, long weight, long weightForSsd) { this.host = host; this.weight = weight; + this.weightForSsd = weightForSsd; } /** * add weight * @param weight the weight + * @param weightForSsd the weight for ssd */ - public void addWeight(long weight) { + public void addWeight(long weight, long weightForSsd) { this.weight += weight; + this.weightForSsd += weightForSsd; } /** @@ -86,6 +91,13 @@ public class HDFSBlocksDistribution { return weight; } + /** + * @return the weight for ssd + */ + public long getWeightForSsd() { + return weightForSsd; + } + /** * comparator used to sort hosts based on weight */ @@ -122,14 +134,33 @@ public class HDFSBlocksDistribution { * @param weight the weight */ public void addHostsAndBlockWeight(String[] hosts, long weight) { + addHostsAndBlockWeight(hosts, weight, null); + } + + /** + * add some weight to a list of hosts, update the value of unique block weight + * @param hosts the list of the host + * @param weight the weight + */ + public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) { if (hosts == null || hosts.length == 0) { // erroneous data return; } addUniqueWeight(weight); - for (String hostname : hosts) { - addHostAndBlockWeight(hostname, weight); + if (storageTypes != null && storageTypes.length == hosts.length) { + for (int i = 0; i < hosts.length; i++) { + long weightForSsd = 0; + if (storageTypes[i] == StorageType.SSD) { + weightForSsd = weight; + } + addHostAndBlockWeight(hosts[i], weight, weightForSsd); + } + } else { + for (String hostname : hosts) { + addHostAndBlockWeight(hostname, weight, 0); + } } } @@ -141,13 +172,13 @@ public class HDFSBlocksDistribution { uniqueBlocksTotalWeight += weight; } - /** * add some weight to a specific host * @param host the host name * @param weight the weight + * @param weightForSsd the weight for ssd */ - private void addHostAndBlockWeight(String host, long weight) { + private void addHostAndBlockWeight(String host, long weight, long weightForSsd) { if (host == null) { // erroneous data return; @@ -155,10 +186,10 @@ public class HDFSBlocksDistribution { HostAndWeight hostAndWeight = this.hostAndWeights.get(host); if(hostAndWeight == null) { - hostAndWeight = new HostAndWeight(host, weight); + hostAndWeight = new HostAndWeight(host, weight, weightForSsd); this.hostAndWeights.put(host, hostAndWeight); } else { - hostAndWeight.addWeight(weight); + hostAndWeight.addWeight(weight, weightForSsd); } } @@ -194,20 +225,43 @@ public class HDFSBlocksDistribution { } /** - * return the locality index of a given host + * Implementations 'visit' hostAndWeight. + */ + public interface Visitor { + float visit(final HostAndWeight hostAndWeight); + } + + /** * @param host the host name * @return the locality index of the given host */ public float getBlockLocalityIndex(String host) { + return getBlockLocalityIndexInternal(host, + e -> (float) e.weight / (float) uniqueBlocksTotalWeight); + } + + /** + * @param host the host name + * @return the locality index with ssd of the given host + */ + public float getBlockLocalityIndexForSsd(String host) { + return getBlockLocalityIndexInternal(host, + e -> (float) e.weightForSsd / (float) uniqueBlocksTotalWeight); + } + + /** + * @param host the host name + * @return the locality index of the given host + */ + private float getBlockLocalityIndexInternal(String host, Visitor visitor) { float localityIndex = 0; HostAndWeight hostAndWeight = this.hostAndWeights.get(host); if (hostAndWeight != null && uniqueBlocksTotalWeight != 0) { - localityIndex=(float)hostAndWeight.weight/(float)uniqueBlocksTotalWeight; + localityIndex = visitor.visit(hostAndWeight); } return localityIndex; } - /** * This will add the distribution from input to this object * @param otherBlocksDistribution the other hdfs blocks distribution @@ -218,7 +272,7 @@ public class HDFSBlocksDistribution { for (Map.Entry otherHostAndWeight: otherHostAndWeights.entrySet()) { addHostAndBlockWeight(otherHostAndWeight.getValue().host, - otherHostAndWeight.getValue().weight); + otherHostAndWeight.getValue().weight, otherHostAndWeight.getValue().weightForSsd); } addUniqueWeight(otherBlocksDistribution.getUniqueBlocksTotalWeight()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 34052bbfba3..26b3fc25195 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -73,6 +73,7 @@ import org.apache.hadoop.hbase.ExecutorStatusChore; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; @@ -1692,8 +1693,9 @@ public class HRegionServer extends Thread implements totalStaticBloomSizeKB += (int) (store.getTotalStaticBloomSize() / 1024); } - float dataLocality = - r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname()); + HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); + float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname()); + float dataLocalityForSsd = hdfsBd.getBlockLocalityIndexForSsd(serverName.getHostname()); if (regionLoadBldr == null) { regionLoadBldr = RegionLoad.newBuilder(); } @@ -1721,6 +1723,7 @@ public class HRegionServer extends Thread implements .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) .setDataLocality(dataLocality) + .setDataLocalityForSsd(dataLocalityForSsd) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); r.setCompleteSequenceId(regionLoadBldr); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 4b6c8af4c53..a75d31cddeb 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -61,6 +61,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.HConstants; @@ -729,12 +730,7 @@ public final class FSUtils { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); BlockLocation [] blockLocations = fs.getFileBlockLocations(status, start, length); - for(BlockLocation bl : blockLocations) { - String [] hosts = bl.getHosts(); - long len = bl.getLength(); - blocksDistribution.addHostsAndBlockWeight(hosts, len); - } - + addToHDFSBlocksDistribution(blocksDistribution, blockLocations); return blocksDistribution; } @@ -749,7 +745,8 @@ public final class FSUtils { for (BlockLocation bl : blockLocations) { String[] hosts = bl.getHosts(); long len = bl.getLength(); - blocksDistribution.addHostsAndBlockWeight(hosts, len); + StorageType[] storageTypes = bl.getStorageTypes(); + blocksDistribution.addHostsAndBlockWeight(hosts, len, storageTypes); } } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 104535e83b4..ef173158f0c 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -227,6 +227,9 @@ if (fqtn != null && master.isInitialized()) {
  • Base Stats
  • +
  • + Localities +
  • Compactions
  • @@ -243,7 +246,6 @@ if (fqtn != null && master.isInitialized()) { StorefileSize Num.Storefiles MemSize - Locality Start Key End Key <% @@ -269,7 +271,6 @@ if (fqtn != null && master.isInitialized()) { String fileSize = ZEROMB; String fileCount = "N/A"; String memSize = ZEROMB; - float locality = 0.0f; if (metaLocation != null) { ServerMetrics sl = master.getServerManager().getLoad(metaLocation); @@ -290,7 +291,6 @@ if (fqtn != null && master.isInitialized()) { if (mSize > 0) { memSize = StringUtils.byteDesc((long)mSize); } - locality = load.getDataLocality(); } } } @@ -303,7 +303,6 @@ if (fqtn != null && master.isInitialized()) { <%= fileSize%> <%= fileCount%> <%= memSize%> - <%= locality%> <%= escapeXml(Bytes.toString(meta.getStartKey())) %> <%= escapeXml(Bytes.toString(meta.getEndKey())) %> <% @@ -319,6 +318,52 @@ if (fqtn != null && master.isInitialized()) { +
    + + + + + + + + + + + <% + // NOTE: Presumes meta with one or more replicas + for (int j = 0; j < numMetaReplicas; j++) { + RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica( + RegionInfoBuilder.FIRST_META_REGIONINFO, j); + ServerName metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1); + for (int i = 0; i < 1; i++) { + String hostAndPort = ""; + float locality = 0.0f; + float localityForSsd = 0.0f; + + if (metaLocation != null) { + ServerMetrics sl = master.getServerManager().getLoad(metaLocation); + hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation); + if (sl != null) { + Map map = sl.getRegionMetrics(); + if (map.containsKey(meta.getRegionName())) { + RegionMetrics load = map.get(meta.getRegionName()); + locality = load.getDataLocality(); + localityForSsd = load.getDataLocalityForSsd(); + } + } + } + %> + + + + + + + <% } %> + <%} %> + +
    NameRegion ServerLocalityLocalityForSsd
    <%= escapeXml(meta.getRegionNameAsString()) %><%= StringEscapeUtils.escapeHtml4(hostAndPort) %><%= locality%><%= localityForSsd%>
    +
    @@ -778,6 +823,9 @@ if (fqtn != null && master.isInitialized()) {
  • Base Stats
  • +
  • + Localities +
  • Compactions
  • @@ -794,7 +842,6 @@ if (fqtn != null && master.isInitialized()) { - @@ -826,7 +873,6 @@ if (fqtn != null && master.isInitialized()) { String regionSize = ZEROMB; String fileCount = "N/A"; String memSize = ZEROMB; - float locality = 0.0f; String state = "N/A"; if (load != null) { readReq = String.format("%,1d", load.getReadRequestCount()); @@ -840,7 +886,6 @@ if (fqtn != null && master.isInitialized()) { if (mSize > 0) { memSize = StringUtils.byteDesc((long)mSize); } - locality = load.getDataLocality(); } if (stateMap.containsKey(regionInfo.getEncodedName())) { @@ -886,7 +931,6 @@ if (fqtn != null && master.isInitialized()) { - @@ -910,6 +954,64 @@ if (fqtn != null && master.isInitialized()) { here to see all regions.

    <% } %> +
    +
    StorefileSize
    (<%= totalSizeStr %>)
    Num.Storefiles
    (<%= String.format("%,1d", totalStoreFileCount)%>)
    MemSize
    (<%= totalMemSizeStr %>)
    Locality Start Key End Key Region State<%= regionSize%> <%= fileCount%> <%= memSize%><%= locality%> <%= escapeXml(Bytes.toStringBinary(regionInfo.getStartKey()))%> <%= escapeXml(Bytes.toStringBinary(regionInfo.getEndKey()))%> <%= state%>
    + + + + + + + + + + <% + numRegionsRendered = 0; + for (Map.Entry hriEntry : entryList) { + RegionInfo regionInfo = hriEntry.getKey(); + ServerName addr = regionsToServer.get(regionInfo); + RegionMetrics load = hriEntry.getValue(); + String urlRegionServer = null; + float locality = 0.0f; + float localityForSsd = 0.0f; + String state = "N/A"; + if (load != null) { + locality = load.getDataLocality(); + localityForSsd = load.getDataLocalityForSsd(); + } + + if (addr != null) { + // This port might be wrong if RS actually ended up using something else. + urlRegionServer = + "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/rs-status"; + } + + if (numRegionsRendered < numRegionsToRender) { + numRegionsRendered++; + %> + + + <% + if (urlRegionServer != null) { + %> + + <% + } else { + %> + + <% + } + %> + + + + <% } %> + <% } %> + +
    Name(<%= String.format("%,1d", regions.size())%>)Region ServerLocalityLocalityForSsd
    <%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %> + <%= addr == null? "-": StringEscapeUtils.escapeHtml4(addr.getHostname().toString()) + ":" + master.getRegionServerInfoPort(addr) %> + not deployed<%= locality%><%= localityForSsd%>
    +
    diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java index e9ec333e31e..bebc7e357c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java @@ -21,6 +21,8 @@ import static junit.framework.Assert.assertEquals; import java.util.HashMap; import java.util.Map; + +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -50,13 +52,19 @@ public class TestHDFSBlocksDistribution { distribution.addHostsAndBlockWeight(new String[] {"testTwo"}, 222); assertEquals("Should be two hosts", 2, distribution.getHostAndWeights().size()); assertEquals("Total weight should be 525", 525, distribution.getUniqueBlocksTotalWeight()); + distribution.addHostsAndBlockWeight(new String[] {"test"}, 100 + , new StorageType[] { StorageType.SSD}); + assertEquals("test host should have weight 403", 403 + , distribution.getHostAndWeights().get("test").getWeight()); + assertEquals("test host should have weight for ssd 100", 100 + , distribution.getHostAndWeights().get("test").getWeightForSsd()); } public class MockHDFSBlocksDistribution extends HDFSBlocksDistribution { @Override public Map getHostAndWeights() { HashMap map = new HashMap<>(); - map.put("test", new HostAndWeight(null, 100)); + map.put("test", new HostAndWeight(null, 100, 0)); return map; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index 1f5802c9864..38d60c481c5 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -493,6 +493,10 @@ public class TestRegionsRecoveryChore { return compactedStoreRefCount; } + @Override + public float getDataLocalityForSsd() { + return 0; + } }; return regionMetrics; }