diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java index fcaec60d403..faaeba7d485 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionLoad.java @@ -370,6 +370,11 @@ public class RegionLoad implements RegionMetrics { return metrics.getMaxCompactedStoreFileRefCount(); } + @Override + public float getDataLocalityForSsd() { + return metrics.getDataLocalityForSsd(); + } + /** * @see java.lang.Object#toString() */ diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java index eab57dc2e81..9266691657a 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetrics.java @@ -154,4 +154,10 @@ public interface RegionMetrics { * of this region */ int getMaxCompactedStoreFileRefCount(); + + /** + * Different from dataLocality,this metric's numerator only include the data stored on ssd + * @return the data locality for ssd of region in the regionserver + */ + float getDataLocalityForSsd(); } diff --git a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java index da0f4bfa229..1cfe9be6a97 100644 --- a/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java +++ b/hbase-client/src/main/java/org/apache/hadoop/hbase/RegionMetricsBuilder.java @@ -51,6 +51,8 @@ public final class RegionMetricsBuilder { .setCompactingCellCount(regionLoadPB.getTotalCompactingKVs()) .setCompletedSequenceId(regionLoadPB.getCompleteSequenceId()) .setDataLocality(regionLoadPB.hasDataLocality() ? regionLoadPB.getDataLocality() : 0.0f) + .setDataLocalityForSsd(regionLoadPB.hasDataLocalityForSsd() ? + regionLoadPB.getDataLocalityForSsd() : 0.0f) .setFilteredReadRequestCount(regionLoadPB.getFilteredReadRequestsCount()) .setStoreFileUncompressedDataIndexSize(new Size(regionLoadPB.getTotalStaticIndexSizeKB(), Size.Unit.KILOBYTE)) @@ -145,6 +147,7 @@ public final class RegionMetricsBuilder { private Map storeSequenceIds = Collections.emptyMap(); private float dataLocality; private long lastMajorCompactionTimestamp; + private float dataLocalityForSsd; private RegionMetricsBuilder(byte[] name) { this.name = name; } @@ -229,6 +232,10 @@ public final class RegionMetricsBuilder { this.lastMajorCompactionTimestamp = value; return this; } + public RegionMetricsBuilder setDataLocalityForSsd(float value) { + this.dataLocalityForSsd = value; + return this; + } public RegionMetrics build() { return new RegionMetricsImpl(name, @@ -251,7 +258,8 @@ public final class RegionMetricsBuilder { completedSequenceId, storeSequenceIds, dataLocality, - lastMajorCompactionTimestamp); + lastMajorCompactionTimestamp, + dataLocalityForSsd); } private static class RegionMetricsImpl implements RegionMetrics { @@ -276,6 +284,7 @@ public final class RegionMetricsBuilder { private final Map storeSequenceIds; private final float dataLocality; private final long lastMajorCompactionTimestamp; + private final float dataLocalityForSsd; RegionMetricsImpl(byte[] name, int storeCount, int storeFileCount, @@ -296,7 +305,8 @@ public final class RegionMetricsBuilder { long completedSequenceId, Map storeSequenceIds, float dataLocality, - long lastMajorCompactionTimestamp) { + long lastMajorCompactionTimestamp, + float dataLocalityForSsd) { this.name = Preconditions.checkNotNull(name); this.storeCount = storeCount; this.storeFileCount = storeFileCount; @@ -318,6 +328,7 @@ public final class RegionMetricsBuilder { this.storeSequenceIds = Preconditions.checkNotNull(storeSequenceIds); this.dataLocality = dataLocality; this.lastMajorCompactionTimestamp = lastMajorCompactionTimestamp; + this.dataLocalityForSsd = dataLocalityForSsd; } @Override @@ -425,6 +436,11 @@ public final class RegionMetricsBuilder { return lastMajorCompactionTimestamp; } + @Override + public float getDataLocalityForSsd() { + return dataLocalityForSsd; + } + @Override public String toString() { StringBuilder sb = Strings.appendKeyValue(new StringBuilder(), "storeCount", @@ -474,6 +490,8 @@ public final class RegionMetricsBuilder { this.getCompletedSequenceId()); Strings.appendKeyValue(sb, "dataLocality", this.getDataLocality()); + Strings.appendKeyValue(sb, "dataLocalityForSsd", + this.getDataLocalityForSsd()); return sb.toString(); } } diff --git a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto index d708674220c..cc8bc887a30 100644 --- a/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto +++ b/hbase-protocol-shaded/src/main/protobuf/ClusterStatus.proto @@ -155,6 +155,9 @@ message RegionLoad { * that belong to given region */ optional int32 max_compacted_store_file_ref_count = 22 [default = 0]; + + /** The current data locality for ssd for region in the regionserver */ + optional float data_locality_for_ssd = 23; } message UserLoad { diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java index aa25fef452c..734a27ed60e 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/HDFSBlocksDistribution.java @@ -25,7 +25,7 @@ import java.util.Map; import java.util.NavigableSet; import java.util.TreeMap; import java.util.TreeSet; - +import org.apache.hadoop.fs.StorageType; import org.apache.yetus.audience.InterfaceAudience; @@ -53,23 +53,28 @@ public class HDFSBlocksDistribution { private String host; private long weight; + private long weightForSsd; /** * Constructor * @param host the host name * @param weight the weight + * @param weightForSsd the weight for ssd */ - public HostAndWeight(String host, long weight) { + public HostAndWeight(String host, long weight, long weightForSsd) { this.host = host; this.weight = weight; + this.weightForSsd = weightForSsd; } /** * add weight * @param weight the weight + * @param weightForSsd the weight for ssd */ - public void addWeight(long weight) { + public void addWeight(long weight, long weightForSsd) { this.weight += weight; + this.weightForSsd += weightForSsd; } /** @@ -86,6 +91,13 @@ public class HDFSBlocksDistribution { return weight; } + /** + * @return the weight for ssd + */ + public long getWeightForSsd() { + return weightForSsd; + } + /** * comparator used to sort hosts based on weight */ @@ -122,14 +134,33 @@ public class HDFSBlocksDistribution { * @param weight the weight */ public void addHostsAndBlockWeight(String[] hosts, long weight) { + addHostsAndBlockWeight(hosts, weight, null); + } + + /** + * add some weight to a list of hosts, update the value of unique block weight + * @param hosts the list of the host + * @param weight the weight + */ + public void addHostsAndBlockWeight(String[] hosts, long weight, StorageType[] storageTypes) { if (hosts == null || hosts.length == 0) { // erroneous data return; } addUniqueWeight(weight); - for (String hostname : hosts) { - addHostAndBlockWeight(hostname, weight); + if (storageTypes != null && storageTypes.length == hosts.length) { + for (int i = 0; i < hosts.length; i++) { + long weightForSsd = 0; + if (storageTypes[i] == StorageType.SSD) { + weightForSsd = weight; + } + addHostAndBlockWeight(hosts[i], weight, weightForSsd); + } + } else { + for (String hostname : hosts) { + addHostAndBlockWeight(hostname, weight, 0); + } } } @@ -141,13 +172,13 @@ public class HDFSBlocksDistribution { uniqueBlocksTotalWeight += weight; } - /** * add some weight to a specific host * @param host the host name * @param weight the weight + * @param weightForSsd the weight for ssd */ - private void addHostAndBlockWeight(String host, long weight) { + private void addHostAndBlockWeight(String host, long weight, long weightForSsd) { if (host == null) { // erroneous data return; @@ -155,10 +186,10 @@ public class HDFSBlocksDistribution { HostAndWeight hostAndWeight = this.hostAndWeights.get(host); if(hostAndWeight == null) { - hostAndWeight = new HostAndWeight(host, weight); + hostAndWeight = new HostAndWeight(host, weight, weightForSsd); this.hostAndWeights.put(host, hostAndWeight); } else { - hostAndWeight.addWeight(weight); + hostAndWeight.addWeight(weight, weightForSsd); } } @@ -194,20 +225,43 @@ public class HDFSBlocksDistribution { } /** - * return the locality index of a given host + * Implementations 'visit' hostAndWeight. + */ + public interface Visitor { + float visit(final HostAndWeight hostAndWeight); + } + + /** * @param host the host name * @return the locality index of the given host */ public float getBlockLocalityIndex(String host) { + return getBlockLocalityIndexInternal(host, + e -> (float) e.weight / (float) uniqueBlocksTotalWeight); + } + + /** + * @param host the host name + * @return the locality index with ssd of the given host + */ + public float getBlockLocalityIndexForSsd(String host) { + return getBlockLocalityIndexInternal(host, + e -> (float) e.weightForSsd / (float) uniqueBlocksTotalWeight); + } + + /** + * @param host the host name + * @return the locality index of the given host + */ + private float getBlockLocalityIndexInternal(String host, Visitor visitor) { float localityIndex = 0; HostAndWeight hostAndWeight = this.hostAndWeights.get(host); if (hostAndWeight != null && uniqueBlocksTotalWeight != 0) { - localityIndex=(float)hostAndWeight.weight/(float)uniqueBlocksTotalWeight; + localityIndex = visitor.visit(hostAndWeight); } return localityIndex; } - /** * This will add the distribution from input to this object * @param otherBlocksDistribution the other hdfs blocks distribution @@ -218,7 +272,7 @@ public class HDFSBlocksDistribution { for (Map.Entry otherHostAndWeight: otherHostAndWeights.entrySet()) { addHostAndBlockWeight(otherHostAndWeight.getValue().host, - otherHostAndWeight.getValue().weight); + otherHostAndWeight.getValue().weight, otherHostAndWeight.getValue().weightForSsd); } addUniqueWeight(otherBlocksDistribution.getUniqueBlocksTotalWeight()); } diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java index 01b866d3bbd..3c4157c3c50 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/regionserver/HRegionServer.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hbase.FailedCloseWALAfterInitializedErrorException; import org.apache.hadoop.hbase.HBaseConfiguration; import org.apache.hadoop.hbase.HBaseInterfaceAudience; import org.apache.hadoop.hbase.HConstants; +import org.apache.hadoop.hbase.HDFSBlocksDistribution; import org.apache.hadoop.hbase.HealthCheckChore; import org.apache.hadoop.hbase.MetaTableAccessor; import org.apache.hadoop.hbase.NotServingRegionException; @@ -1704,8 +1705,9 @@ public class HRegionServer extends Thread implements totalStaticBloomSizeKB += (int) (store.getTotalStaticBloomSize() / 1024); } - float dataLocality = - r.getHDFSBlocksDistribution().getBlockLocalityIndex(serverName.getHostname()); + HDFSBlocksDistribution hdfsBd = r.getHDFSBlocksDistribution(); + float dataLocality = hdfsBd.getBlockLocalityIndex(serverName.getHostname()); + float dataLocalityForSsd = hdfsBd.getBlockLocalityIndexForSsd(serverName.getHostname()); if (regionLoadBldr == null) { regionLoadBldr = RegionLoad.newBuilder(); } @@ -1732,6 +1734,7 @@ public class HRegionServer extends Thread implements .setTotalCompactingKVs(totalCompactingKVs) .setCurrentCompactedKVs(currentCompactedKVs) .setDataLocality(dataLocality) + .setDataLocalityForSsd(dataLocalityForSsd) .setLastMajorCompactionTs(r.getOldestHfileTs(true)); r.setCompleteSequenceId(regionLoadBldr); diff --git a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java index 420fdb02ca7..f19f864f3b6 100644 --- a/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java +++ b/hbase-server/src/main/java/org/apache/hadoop/hbase/util/FSUtils.java @@ -60,6 +60,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hbase.ClusterId; import org.apache.hadoop.hbase.HColumnDescriptor; @@ -718,12 +719,7 @@ public final class FSUtils { HDFSBlocksDistribution blocksDistribution = new HDFSBlocksDistribution(); BlockLocation [] blockLocations = fs.getFileBlockLocations(status, start, length); - for(BlockLocation bl : blockLocations) { - String [] hosts = bl.getHosts(); - long len = bl.getLength(); - blocksDistribution.addHostsAndBlockWeight(hosts, len); - } - + addToHDFSBlocksDistribution(blocksDistribution, blockLocations); return blocksDistribution; } @@ -738,7 +734,8 @@ public final class FSUtils { for (BlockLocation bl : blockLocations) { String[] hosts = bl.getHosts(); long len = bl.getLength(); - blocksDistribution.addHostsAndBlockWeight(hosts, len); + StorageType[] storageTypes = bl.getStorageTypes(); + blocksDistribution.addHostsAndBlockWeight(hosts, len, storageTypes); } } diff --git a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp index 8f9673cd025..bd986a111a3 100644 --- a/hbase-server/src/main/resources/hbase-webapps/master/table.jsp +++ b/hbase-server/src/main/resources/hbase-webapps/master/table.jsp @@ -228,6 +228,9 @@
  • Base Stats
  • +
  • + Localities +
  • Compactions
  • @@ -244,7 +247,6 @@ StorefileSize Num.Storefiles MemSize - Locality Start Key End Key <% @@ -270,7 +272,6 @@ String fileSize = ZEROMB; String fileCount = "N/A"; String memSize = ZEROMB; - float locality = 0.0f; if (metaLocation != null) { ServerMetrics sl = master.getServerManager().getLoad(metaLocation); // The host name portion should be safe, but I don't know how we handle IDNs so err on the side of failing safely. @@ -290,7 +291,6 @@ if (mSize > 0) { memSize = StringUtils.byteDesc((long)mSize); } - locality = load.getDataLocality(); } } } @@ -303,7 +303,6 @@ <%= fileSize%> <%= fileCount%> <%= memSize%> - <%= locality%> <%= escapeXml(Bytes.toString(meta.getStartKey())) %> <%= escapeXml(Bytes.toString(meta.getEndKey())) %> <% @@ -319,6 +318,51 @@ +
    + + + + + + + + + + + <% + // NOTE: Presumes meta with one or more replicas + for (int j = 0; j < numMetaReplicas; j++) { + RegionInfo meta = RegionReplicaUtil.getRegionInfoForReplica( + RegionInfoBuilder.FIRST_META_REGIONINFO, j); + ServerName metaLocation = MetaTableLocator.waitMetaRegionLocation(master.getZooKeeper(), j, 1); + for (int i = 0; i < 1; i++) { + String hostAndPort = ""; + float locality = 0.0f; + float localityForSsd = 0.0f; + if (metaLocation != null) { + ServerMetrics sl = master.getServerManager().getLoad(metaLocation); + hostAndPort = URLEncoder.encode(metaLocation.getHostname()) + ":" + master.getRegionServerInfoPort(metaLocation); + if (sl != null) { + Map map = sl.getRegionMetrics(); + if (map.containsKey(meta.getRegionName())) { + RegionMetrics load = map.get(meta.getRegionName()); + locality = load.getDataLocality(); + localityForSsd = load.getDataLocalityForSsd(); + } + } + } + %> + + + + + + + <% } %> + <%} %> + +
    NameRegion ServerLocalityLocalityForSsd
    <%= escapeXml(meta.getRegionNameAsString()) %><%= StringEscapeUtils.escapeHtml4(hostAndPort) %><%= locality%><%= localityForSsd%>
    +
    @@ -777,6 +821,9 @@
  • Base Stats
  • +
  • + Localities +
  • Compactions
  • @@ -793,7 +840,6 @@ - @@ -824,7 +870,6 @@ String regionSize = ZEROMB; String fileCount = "N/A"; String memSize = ZEROMB; - float locality = 0.0f; String state = "N/A"; if (load != null) { readReq = String.format("%,1d", load.getReadRequestCount()); @@ -838,7 +883,6 @@ if (mSize > 0) { memSize = StringUtils.byteDesc((long)mSize); } - locality = load.getDataLocality(); } if (stateMap.containsKey(regionInfo.getEncodedName())) { state = stateMap.get(regionInfo.getEncodedName()).toString(); @@ -882,7 +926,6 @@ - @@ -906,6 +949,61 @@ here to see all regions.

    <% } %> +
    +
    StorefileSize
    (<%= totalSizeStr %>)
    Num.Storefiles
    (<%= String.format("%,1d", totalStoreFileCount)%>)
    MemSize
    (<%= totalMemSizeStr %>)
    Locality Start Key End Key Region State<%= regionSize%> <%= fileCount%> <%= memSize%><%= locality%> <%= escapeXml(Bytes.toStringBinary(regionInfo.getStartKey()))%> <%= escapeXml(Bytes.toStringBinary(regionInfo.getEndKey()))%> <%= state%>
    + + + + + + + + + + <% + numRegionsRendered = 0; + for (Map.Entry hriEntry : entryList) { + RegionInfo regionInfo = hriEntry.getKey(); + ServerName addr = regionsToServer.get(regionInfo); + RegionMetrics load = hriEntry.getValue(); + float locality = 0.0f; + float localityForSsd = 0.0f; + String state = "N/A"; + if (load != null) { + locality = load.getDataLocality(); + localityForSsd = load.getDataLocalityForSsd(); + } + if (addr != null) { + // This port might be wrong if RS actually ended up using something else. + urlRegionServer = + "//" + URLEncoder.encode(addr.getHostname()) + ":" + master.getRegionServerInfoPort(addr) + "/rs-status"; + } + if (numRegionsRendered < numRegionsToRender) { + numRegionsRendered++; + %> + + + <% + if (urlRegionServer != null) { + %> + + <% + } else { + %> + + <% + } + %> + + + + <% } %> + <% } %> + +
    Name(<%= String.format("%,1d", regions.size())%>)Region ServerLocalityLocalityForSsd
    <%= escapeXml(Bytes.toStringBinary(regionInfo.getRegionName())) %> + <%= addr == null? "-": StringEscapeUtils.escapeHtml4(addr.getHostname().toString()) + ":" + master.getRegionServerInfoPort(addr) %> + not deployed<%= locality%><%= localityForSsd%>
    +
    diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java index e9ec333e31e..bebc7e357c9 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/TestHDFSBlocksDistribution.java @@ -21,6 +21,8 @@ import static junit.framework.Assert.assertEquals; import java.util.HashMap; import java.util.Map; + +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.hbase.testclassification.MiscTests; import org.apache.hadoop.hbase.testclassification.SmallTests; import org.junit.ClassRule; @@ -50,13 +52,19 @@ public class TestHDFSBlocksDistribution { distribution.addHostsAndBlockWeight(new String[] {"testTwo"}, 222); assertEquals("Should be two hosts", 2, distribution.getHostAndWeights().size()); assertEquals("Total weight should be 525", 525, distribution.getUniqueBlocksTotalWeight()); + distribution.addHostsAndBlockWeight(new String[] {"test"}, 100 + , new StorageType[] { StorageType.SSD}); + assertEquals("test host should have weight 403", 403 + , distribution.getHostAndWeights().get("test").getWeight()); + assertEquals("test host should have weight for ssd 100", 100 + , distribution.getHostAndWeights().get("test").getWeightForSsd()); } public class MockHDFSBlocksDistribution extends HDFSBlocksDistribution { @Override public Map getHostAndWeights() { HashMap map = new HashMap<>(); - map.put("test", new HostAndWeight(null, 100)); + map.put("test", new HostAndWeight(null, 100, 0)); return map; } diff --git a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java index f170d79db8b..cc95f210b9d 100644 --- a/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java +++ b/hbase-server/src/test/java/org/apache/hadoop/hbase/master/TestRegionsRecoveryChore.java @@ -488,6 +488,10 @@ public class TestRegionsRecoveryChore { return compactedStoreRefCount; } + @Override + public float getDataLocalityForSsd() { + return 0; + } }; return regionMetrics; }